diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-09-12 23:22:53 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-09-12 23:22:53 +0000 |
commit | 656297e1fec9a127ff742df16958ee279ccacec5 (patch) | |
tree | 24347a35dacea36ce742c32c17420f3e31f17e3d /libgo/go/runtime | |
parent | d6ecb707cc5a58816d27908a7aa324c4b0bc67bb (diff) | |
download | gcc-656297e1fec9a127ff742df16958ee279ccacec5.zip gcc-656297e1fec9a127ff742df16958ee279ccacec5.tar.gz gcc-656297e1fec9a127ff742df16958ee279ccacec5.tar.bz2 |
libgo: update to Go1.13
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/194698
From-SVN: r275691
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r-- | libgo/go/runtime/cpuprof.go | 27 | ||||
-rw-r--r-- | libgo/go/runtime/export_test.go | 34 | ||||
-rw-r--r-- | libgo/go/runtime/malloc.go | 53 | ||||
-rw-r--r-- | libgo/go/runtime/mcache.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/mgcscavenge.go | 8 | ||||
-rw-r--r-- | libgo/go/runtime/mheap.go | 18 | ||||
-rw-r--r-- | libgo/go/runtime/panic.go | 11 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/runtime.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/proc.go | 7 | ||||
-rw-r--r-- | libgo/go/runtime/proc_test.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/sigqueue.go | 12 | ||||
-rw-r--r-- | libgo/go/runtime/sigqueue_note.go | 25 |
12 files changed, 165 insertions, 38 deletions
diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go index e49625b..d395210 100644 --- a/libgo/go/runtime/cpuprof.go +++ b/libgo/go/runtime/cpuprof.go @@ -36,9 +36,10 @@ type cpuProfile struct { // 300 words per second. // Hopefully a normal Go thread will get the profiling // signal at least once every few seconds. - extra [1000]uintptr - numExtra int - lostExtra uint64 // count of frames lost because extra is full + extra [1000]uintptr + numExtra int + lostExtra uint64 // count of frames lost because extra is full + lostAtomic uint64 // count of frames lost because of being in atomic64 on mips/arm; updated racily } var cpuprof cpuProfile @@ -94,7 +95,7 @@ func (p *cpuProfile) add(gp *g, stk []uintptr) { } if prof.hz != 0 { // implies cpuprof.log != nil - if p.numExtra > 0 || p.lostExtra > 0 { + if p.numExtra > 0 || p.lostExtra > 0 || p.lostAtomic > 0 { p.addExtra() } hdr := [1]uint64{1} @@ -159,18 +160,20 @@ func (p *cpuProfile) addExtra() { _LostExternalCodePC + sys.PCQuantum, _ExternalCodePC + sys.PCQuantum, } - cpuprof.log.write(nil, 0, hdr[:], lostStk[:]) + p.log.write(nil, 0, hdr[:], lostStk[:]) p.lostExtra = 0 } -} -func (p *cpuProfile) addLostAtomic64(count uint64) { - hdr := [1]uint64{count} - lostStk := [2]uintptr{ - _LostSIGPROFDuringAtomic64PC + sys.PCQuantum, - _SystemPC + sys.PCQuantum, + if p.lostAtomic > 0 { + hdr := [1]uint64{p.lostAtomic} + lostStk := [2]uintptr{ + _LostSIGPROFDuringAtomic64PC + sys.PCQuantum, + _SystemPC + sys.PCQuantum, + } + p.log.write(nil, 0, hdr[:], lostStk[:]) + p.lostAtomic = 0 } - cpuprof.log.write(nil, 0, hdr[:], lostStk[:]) + } // CPUProfile panics. diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go index 0db2393..10890d3 100644 --- a/libgo/go/runtime/export_test.go +++ b/libgo/go/runtime/export_test.go @@ -675,3 +675,37 @@ func (t *Treap) CheckInvariants() { t.mTreap.treap.walkTreap(checkTreapNode) t.mTreap.treap.validateInvariants() } + +func RunGetgThreadSwitchTest() { + // Test that getg works correctly with thread switch. + // With gccgo, if we generate getg inlined, the backend + // may cache the address of the TLS variable, which + // will become invalid after a thread switch. This test + // checks that the bad caching doesn't happen. + + ch := make(chan int) + go func(ch chan int) { + ch <- 5 + LockOSThread() + }(ch) + + g1 := getg() + + // Block on a receive. This is likely to get us a thread + // switch. If we yield to the sender goroutine, it will + // lock the thread, forcing us to resume on a different + // thread. + <-ch + + g2 := getg() + if g1 != g2 { + panic("g1 != g2") + } + + // Also test getg after some control flow, as the + // backend is sensitive to control flow. + g3 := getg() + if g1 != g3 { + panic("g1 != g3") + } +} diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go index cee5f6b..0eee55e 100644 --- a/libgo/go/runtime/malloc.go +++ b/libgo/go/runtime/malloc.go @@ -335,12 +335,21 @@ const ( var physPageSize uintptr // physHugePageSize is the size in bytes of the OS's default physical huge -// page size whose allocation is opaque to the application. +// page size whose allocation is opaque to the application. It is assumed +// and verified to be a power of two. // // If set, this must be set by the OS init code (typically in osinit) before // mallocinit. However, setting it at all is optional, and leaving the default // value is always safe (though potentially less efficient). -var physHugePageSize uintptr +// +// Since physHugePageSize is always assumed to be a power of two, +// physHugePageShift is defined as physHugePageSize == 1 << physHugePageShift. +// The purpose of physHugePageShift is to avoid doing divisions in +// performance critical functions. +var ( + physHugePageSize uintptr + physHugePageShift uint +) // OS memory management abstraction layer // @@ -443,6 +452,17 @@ func mallocinit() { print("system page size (", physPageSize, ") must be a power of 2\n") throw("bad system page size") } + if physHugePageSize&(physHugePageSize-1) != 0 { + print("system huge page size (", physHugePageSize, ") must be a power of 2\n") + throw("bad system huge page size") + } + if physHugePageSize != 0 { + // Since physHugePageSize is a power of 2, it suffices to increase + // physHugePageShift until 1<<physHugePageShift == physHugePageSize. + for 1<<physHugePageShift != physHugePageSize { + physHugePageShift++ + } + } // Initialize the heap. mheap_.init() @@ -877,7 +897,22 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if debug.sbrk != 0 { align := uintptr(16) if typ != nil { - align = uintptr(typ.align) + // TODO(austin): This should be just + // align = uintptr(typ.align) + // but that's only 4 on 32-bit platforms, + // even if there's a uint64 field in typ (see #599). + // This causes 64-bit atomic accesses to panic. + // Hence, we use stricter alignment that matches + // the normal allocator better. + if size&7 == 0 { + align = 8 + } else if size&3 == 0 { + align = 4 + } else if size&1 == 0 { + align = 2 + } else { + align = 1 + } } return persistentalloc(size, align, &memstats.other_sys) } @@ -1076,8 +1111,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { } if rate := MemProfileRate; rate > 0 { - if rate != 1 && int32(size) < c.next_sample { - c.next_sample -= int32(size) + if rate != 1 && size < c.next_sample { + c.next_sample -= size } else { mp := acquirem() profilealloc(mp, x, size) @@ -1180,7 +1215,7 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { // processes, the distance between two samples follows the exponential // distribution (exp(MemProfileRate)), so the best return value is a random // number taken from an exponential distribution whose mean is MemProfileRate. -func nextSample() int32 { +func nextSample() uintptr { if GOOS == "plan9" { // Plan 9 doesn't support floating point in note handler. if g := getg(); g == g.m.gsignal { @@ -1188,7 +1223,7 @@ func nextSample() int32 { } } - return fastexprand(MemProfileRate) + return uintptr(fastexprand(MemProfileRate)) } // fastexprand returns a random number from an exponential distribution with @@ -1223,14 +1258,14 @@ func fastexprand(mean int) int32 { // nextSampleNoFP is similar to nextSample, but uses older, // simpler code to avoid floating point. -func nextSampleNoFP() int32 { +func nextSampleNoFP() uintptr { // Set first allocation sample size. rate := MemProfileRate if rate > 0x3fffffff { // make 2*rate not overflow rate = 0x3fffffff } if rate != 0 { - return int32(fastrand() % uint32(2*rate)) + return uintptr(fastrand() % uint32(2*rate)) } return 0 } diff --git a/libgo/go/runtime/mcache.go b/libgo/go/runtime/mcache.go index ca92682..27328e1 100644 --- a/libgo/go/runtime/mcache.go +++ b/libgo/go/runtime/mcache.go @@ -19,7 +19,7 @@ import ( type mcache struct { // The following members are accessed on every malloc, // so they are grouped here for better caching. - next_sample int32 // trigger heap sample after allocating this many bytes + next_sample uintptr // trigger heap sample after allocating this many bytes local_scan uintptr // bytes of scannable heap allocated // Allocator cache for tiny objects w/o pointers. diff --git a/libgo/go/runtime/mgcscavenge.go b/libgo/go/runtime/mgcscavenge.go index 910c123..9f8c472 100644 --- a/libgo/go/runtime/mgcscavenge.go +++ b/libgo/go/runtime/mgcscavenge.go @@ -130,7 +130,7 @@ func gcPaceScavenger() { if physHugePageSize != 0 { // Start by computing the amount of free memory we have in huge pages // in total. Trivially, this is all the huge page work we need to do. - hugeWork := uint64(mheap_.free.unscavHugePages * physHugePageSize) + hugeWork := uint64(mheap_.free.unscavHugePages) << physHugePageShift // ...but it could turn out that there's more huge work to do than // total work, so cap it at total work. This might happen for very large @@ -138,14 +138,14 @@ func gcPaceScavenger() { // that there are free chunks of memory larger than a huge page that we don't want // to scavenge. if hugeWork >= totalWork { - hugePages := totalWork / uint64(physHugePageSize) - hugeWork = hugePages * uint64(physHugePageSize) + hugePages := totalWork >> physHugePageShift + hugeWork = hugePages << physHugePageShift } // Everything that's not huge work is regular work. At this point we // know huge work so we can calculate how much time that will take // based on scavengePageRate (which applies to pages of any size). regularWork = totalWork - hugeWork - hugeTime = hugeWork / uint64(physHugePageSize) * scavengeHugePagePeriod + hugeTime = (hugeWork >> physHugePageShift) * scavengeHugePagePeriod } // Finally, we can compute how much time it'll take to do the regular work // and the total time to do all the work. diff --git a/libgo/go/runtime/mheap.go b/libgo/go/runtime/mheap.go index f18bf9b..cd01b3f 100644 --- a/libgo/go/runtime/mheap.go +++ b/libgo/go/runtime/mheap.go @@ -514,11 +514,13 @@ func (h *mheap) coalesce(s *mspan) { h.free.insert(other) } - hpBefore := s.hugePages() + hpMiddle := s.hugePages() // Coalesce with earlier, later spans. + var hpBefore uintptr if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree { if s.scavenged == before.scavenged { + hpBefore = before.hugePages() merge(before, s, before) } else { realign(before, s, before) @@ -526,23 +528,29 @@ func (h *mheap) coalesce(s *mspan) { } // Now check to see if next (greater addresses) span is free and can be coalesced. + var hpAfter uintptr if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree { if s.scavenged == after.scavenged { + hpAfter = after.hugePages() merge(s, after, after) } else { realign(s, after, after) } } - - if !s.scavenged && s.hugePages() > hpBefore { + if !s.scavenged && s.hugePages() > hpBefore+hpMiddle+hpAfter { // If s has grown such that it now may contain more huge pages than it - // did before, then mark the whole region as huge-page-backable. + // and its now-coalesced neighbors did before, then mark the whole region + // as huge-page-backable. // // Otherwise, on systems where we break up huge pages (like Linux) // s may not be backed by huge pages because it could be made up of // pieces which are broken up in the underlying VMA. The primary issue // with this is that it can lead to a poor estimate of the amount of // free memory backed by huge pages for determining the scavenging rate. + // + // TODO(mknyszek): Measure the performance characteristics of sysHugePage + // and determine whether it makes sense to only sysHugePage on the pages + // that matter, or if it's better to just mark the whole region. sysHugePage(unsafe.Pointer(s.base()), s.npages*pageSize) } } @@ -561,7 +569,7 @@ func (s *mspan) hugePages() uintptr { end &^= physHugePageSize - 1 } if start < end { - return (end - start) / physHugePageSize + return (end - start) >> physHugePageShift } return 0 } diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go index 21ffb5c..2a11f932 100644 --- a/libgo/go/runtime/panic.go +++ b/libgo/go/runtime/panic.go @@ -61,13 +61,24 @@ func panicCheck1(pc uintptr, msg string) { } // Same as above, but calling from the runtime is allowed. +// +// Using this function is necessary for any panic that may be +// generated by runtime.sigpanic, since those are always called by the +// runtime. func panicCheck2(err string) { + // panic allocates, so to avoid recursive malloc, turn panics + // during malloc into throws. gp := getg() if gp != nil && gp.m != nil && gp.m.mallocing != 0 { throw(err) } } +// Many of the following panic entry-points turn into throws when they +// happen in various runtime contexts. These should never happen in +// the runtime, and if they do, they indicate a serious issue and +// should not be caught by user code. +// // The panic{Index,Slice,divide,shift} functions are called by // code generated by the compiler for out of bounds index expressions, // out of bounds slice expressions, division by zero, and shift by negative. diff --git a/libgo/go/runtime/pprof/runtime.go b/libgo/go/runtime/pprof/runtime.go index e6aace8..b71bbad 100644 --- a/libgo/go/runtime/pprof/runtime.go +++ b/libgo/go/runtime/pprof/runtime.go @@ -16,6 +16,7 @@ func runtime_setProfLabel(labels unsafe.Pointer) func runtime_getProfLabel() unsafe.Pointer // SetGoroutineLabels sets the current goroutine's labels to match ctx. +// A new goroutine inherits the labels of the goroutine that created it. // This is a lower-level API than Do, which should be used instead when possible. func SetGoroutineLabels(ctx context.Context) { ctxLabels, _ := ctx.Value(labelContextKey{}).(*labelMap) @@ -24,6 +25,7 @@ func SetGoroutineLabels(ctx context.Context) { // Do calls f with a copy of the parent context with the // given labels added to the parent's label map. +// Goroutines spawned while executing f will inherit the augmented label-set. // Each key/value pair in labels is inserted into the label map in the // order provided, overriding any previous value for the same key. // The augmented label map will be set for the duration of the call to f diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index afedad5..a0147cf 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -3505,9 +3505,6 @@ func _GC() { _GC() } func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } func _VDSO() { _VDSO() } -// Counts SIGPROFs received while in atomic64 critical section, on mips{,le} -var lostAtomic64Count uint64 - var _SystemPC = funcPC(_System) var _ExternalCodePC = funcPC(_ExternalCode) var _LostExternalCodePC = funcPC(_LostExternalCode) @@ -3598,10 +3595,6 @@ func sigprof(pc uintptr, gp *g, mp *m) { } if prof.hz != 0 { - if (GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm") && lostAtomic64Count > 0 { - cpuprof.addLostAtomic64(lostAtomic64Count) - lostAtomic64Count = 0 - } cpuprof.add(gp, stk[:n]) } getg().m.mallocing-- diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go index b9be338..fee03be 100644 --- a/libgo/go/runtime/proc_test.go +++ b/libgo/go/runtime/proc_test.go @@ -984,3 +984,7 @@ func TestPreemptionAfterSyscall(t *testing.T) { }) } } + +func TestGetgThreadSwitch(t *testing.T) { + runtime.RunGetgThreadSwitchTest() +} diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go index 1a29b20..2070464 100644 --- a/libgo/go/runtime/sigqueue.go +++ b/libgo/go/runtime/sigqueue.go @@ -105,6 +105,10 @@ Send: break Send case sigReceiving: if atomic.Cas(&sig.state, sigReceiving, sigIdle) { + if GOOS == "darwin" { + sigNoteWakeup(&sig.note) + break Send + } notewakeup(&sig.note) break Send } @@ -136,6 +140,10 @@ func signal_recv() uint32 { throw("signal_recv: inconsistent state") case sigIdle: if atomic.Cas(&sig.state, sigIdle, sigReceiving) { + if GOOS == "darwin" { + sigNoteSleep(&sig.note) + break Receive + } notetsleepg(&sig.note, -1) noteclear(&sig.note) break Receive @@ -188,6 +196,10 @@ func signal_enable(s uint32) { // to use for initialization. It does not pass // signal information in m. sig.inuse = true // enable reception of signals; cannot disable + if GOOS == "darwin" { + sigNoteSetup(&sig.note) + return + } noteclear(&sig.note) return } diff --git a/libgo/go/runtime/sigqueue_note.go b/libgo/go/runtime/sigqueue_note.go new file mode 100644 index 0000000..16aeeb2 --- /dev/null +++ b/libgo/go/runtime/sigqueue_note.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The current implementation of notes on Darwin is not async-signal-safe, +// so on Darwin the sigqueue code uses different functions to wake up the +// signal_recv thread. This file holds the non-Darwin implementations of +// those functions. These functions will never be called. + +// +build !darwin +// +build !plan9 + +package runtime + +func sigNoteSetup(*note) { + throw("sigNoteSetup") +} + +func sigNoteSleep(*note) { + throw("sigNoteSleep") +} + +func sigNoteWakeup(*note) { + throw("sigNoteWakeup") +} |