diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-08-19 20:49:42 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-08-19 20:49:42 +0000 |
commit | b74c00bd10bc789bc2fc4d56b02e25bb431611f2 (patch) | |
tree | 1945ba0d73dd9ecc9361beaaa2fa056653e3bce8 /libgo | |
parent | 7731e84e7ec203bea2bf07a273354b78ff853ba0 (diff) | |
parent | 4f6bdb08bab64b973e465fb45deb751561e3b969 (diff) | |
download | gcc-b74c00bd10bc789bc2fc4d56b02e25bb431611f2.zip gcc-b74c00bd10bc789bc2fc4d56b02e25bb431611f2.tar.gz gcc-b74c00bd10bc789bc2fc4d56b02e25bb431611f2.tar.bz2 |
Merge from trunk revision 274678.
From-SVN: r274681
Diffstat (limited to 'libgo')
-rw-r--r-- | libgo/go/runtime/mcentral.go | 9 | ||||
-rw-r--r-- | libgo/go/runtime/mgcmark.go | 5 | ||||
-rw-r--r-- | libgo/go/runtime/mgcsweep.go | 38 | ||||
-rw-r--r-- | libgo/go/runtime/mwbbuf.go | 9 | ||||
-rw-r--r-- | libgo/go/runtime/panic.go | 38 | ||||
-rw-r--r-- | libgo/go/runtime/runtime1.go | 19 | ||||
-rw-r--r-- | libgo/go/runtime/runtime2.go | 9 | ||||
-rw-r--r-- | libgo/go/runtime/stack_test.go | 62 |
8 files changed, 134 insertions, 55 deletions
diff --git a/libgo/go/runtime/mcentral.go b/libgo/go/runtime/mcentral.go index 0196ba4..a60eb9f 100644 --- a/libgo/go/runtime/mcentral.go +++ b/libgo/go/runtime/mcentral.go @@ -56,15 +56,6 @@ retry: c.empty.insertBack(s) unlock(&c.lock) s.sweep(true) - - // With gccgo's conservative GC, the returned span may - // now be full. See the comments in mspan.sweep. - if uintptr(s.allocCount) == s.nelems { - s.freeindex = s.nelems - lock(&c.lock) - goto retry - } - goto havespan } if s.sweepgen == sg-1 { diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go index 1b8a7a3..2463a48 100644 --- a/libgo/go/runtime/mgcmark.go +++ b/libgo/go/runtime/mgcmark.go @@ -657,6 +657,11 @@ func scanstack(gp *g, gcw *gcWork) { scanstackblock(uintptr(unsafe.Pointer(&gp.context)), unsafe.Sizeof(gp.context), gcw) } + // Note: in the gc runtime scanstack also scans defer records. + // This is necessary as it uses stack objects (a.k.a. stack tracing). + // We don't (yet) do stack objects, and regular stack/heap scan + // will take care of defer records just fine. + gp.gcscanvalid = true } diff --git a/libgo/go/runtime/mgcsweep.go b/libgo/go/runtime/mgcsweep.go index bc53de4..539a982 100644 --- a/libgo/go/runtime/mgcsweep.go +++ b/libgo/go/runtime/mgcsweep.go @@ -326,39 +326,16 @@ func (s *mspan) sweep(preserve bool) bool { freeToHeap = true } nfreed := s.allocCount - nalloc - - // This check is not reliable with gccgo, because of - // conservative stack scanning. The test boils down to - // checking that no new bits have been set in gcmarkBits since - // the span was added to the sweep count. New bits are set by - // greyobject. Seeing a new bit means that a live pointer has - // appeared that was not found during the mark phase. That can - // not happen when pointers are followed strictly. However, - // with conservative checking, it is possible for a pointer - // that will never be used to appear live and to cause a mark - // to be added. That is unfortunate in that it causes this - // check to be inaccurate, and it will keep an object live - // unnecessarily, but provided the pointer is not really live - // it is not otherwise a problem. So we disable the test for gccgo. - nfreedSigned := int(nfreed) if nalloc > s.allocCount { - if usestackmaps { - print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") - throw("sweep increased allocation count") - } - - // For gccgo, adjust the freed count as a signed number. - nfreedSigned = int(s.allocCount) - int(nalloc) - if uintptr(nalloc) == s.nelems { - s.freeindex = s.nelems - } + print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") + throw("sweep increased allocation count") } s.allocCount = nalloc wasempty := s.nextFreeIndex() == s.nelems s.freeindex = 0 // reset allocation index to start of span. if trace.enabled { - getg().m.p.ptr().traceReclaimed += uintptr(nfreedSigned) * s.elemsize + getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize } // gcmarkBits becomes the allocBits. @@ -374,7 +351,7 @@ func (s *mspan) sweep(preserve bool) bool { // But we need to set it before we make the span available for allocation // (return it to heap or mcentral), because allocation code assumes that a // span is already swept if available for allocation. - if freeToHeap || nfreedSigned <= 0 { + if freeToHeap || nfreed == 0 { // The span must be in our exclusive ownership until we update sweepgen, // check for potential races. if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { @@ -387,11 +364,8 @@ func (s *mspan) sweep(preserve bool) bool { atomic.Store(&s.sweepgen, sweepgen) } - if spc.sizeclass() != 0 { - c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreedSigned) - } - - if nfreedSigned > 0 && spc.sizeclass() != 0 { + if nfreed > 0 && spc.sizeclass() != 0 { + c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed) res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty) // mcentral.freeSpan updates sweepgen } else if freeToHeap { diff --git a/libgo/go/runtime/mwbbuf.go b/libgo/go/runtime/mwbbuf.go index 4c875ff..a27406e 100644 --- a/libgo/go/runtime/mwbbuf.go +++ b/libgo/go/runtime/mwbbuf.go @@ -285,10 +285,17 @@ func wbBufFlush1(_p_ *p) { // path to reduce the rate of flushes? continue } - obj, span, objIndex := findObject(ptr, 0, 0, false) + obj, span, objIndex := findObject(ptr, 0, 0, !usestackmaps) if obj == 0 { continue } + if span.isFree(objIndex) { + // For gccgo, it is possible that we have a write barrier + // writing to unintialized stack memory. So we could see + // a bad pointer in the write barrier buffer. Don't mark + // it in this case. + continue + } // TODO: Consider making two passes where the first // just prefetches the mark bits. mbits := span.markBitsForIndex(objIndex) diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go index 264ad38..88c0a4d 100644 --- a/libgo/go/runtime/panic.go +++ b/libgo/go/runtime/panic.go @@ -13,6 +13,7 @@ import ( // themselves, so that the compiler will export them. // //go:linkname deferproc runtime.deferproc +//go:linkname deferprocStack runtime.deferprocStack //go:linkname deferreturn runtime.deferreturn //go:linkname setdeferretaddr runtime.setdeferretaddr //go:linkname checkdefer runtime.checkdefer @@ -124,6 +125,38 @@ func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) { d.makefunccanrecover = false } +// deferprocStack queues a new deferred function with a defer record on the stack. +// The defer record, d, does not need to be initialized. +// Other arguments are the same as in deferproc. +//go:nosplit +func deferprocStack(d *_defer, frame *bool, pfn uintptr, arg unsafe.Pointer) { + gp := getg() + if gp.m.curg != gp { + // go code on the system stack can't defer + throw("defer on system stack") + } + d.pfn = pfn + d.retaddr = 0 + d.makefunccanrecover = false + d.heap = false + // The lines below implement: + // d.frame = frame + // d.arg = arg + // d._panic = nil + // d.panicStack = gp._panic + // d.link = gp._defer + // But without write barriers. They are writes to the stack so they + // don't need a write barrier, and furthermore are to uninitialized + // memory, so they must not use a write barrier. + *(*uintptr)(unsafe.Pointer(&d.frame)) = uintptr(unsafe.Pointer(frame)) + *(*uintptr)(unsafe.Pointer(&d.arg)) = uintptr(unsafe.Pointer(arg)) + *(*uintptr)(unsafe.Pointer(&d._panic)) = 0 + *(*uintptr)(unsafe.Pointer(&d.panicStack)) = uintptr(unsafe.Pointer(gp._panic)) + *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) + + gp._defer = d +} + // Allocate a Defer, usually using per-P pool. // Each defer must be released with freedefer. func newdefer() *_defer { @@ -155,11 +188,13 @@ func newdefer() *_defer { // Duplicate the tail below so if there's a // crash in checkPut we can tell if d was just // allocated or came from the pool. + d.heap = true d.link = gp._defer gp._defer = d return d } } + d.heap = true d.link = gp._defer gp._defer = d return d @@ -179,6 +214,9 @@ func freedefer(d *_defer) { if d.pfn != 0 { freedeferfn() } + if !d.heap { + return + } pp := getg().m.p.ptr() if len(pp.deferpool) == cap(pp.deferpool) { // Transfer half of local cache to the central cache. diff --git a/libgo/go/runtime/runtime1.go b/libgo/go/runtime/runtime1.go index e2567b3..2424f20 100644 --- a/libgo/go/runtime/runtime1.go +++ b/libgo/go/runtime/runtime1.go @@ -352,19 +352,12 @@ func parsedebugvars() { // defaults debug.cgocheck = 1 - // Unfortunately, because gccgo uses conservative stack scanning, - // we can not enable invalid pointer checking. It is possible for - // memory block M1 to point to M2, and for both to be dead. - // We release M2, causing the entire span to be released. - // Before we release M1, a stack pointer appears that point into it. - // This stack pointer is presumably dead, but causes M1 to be marked. - // We scan M1 and see the pointer to M2 on a released span. - // At that point, if debug.invalidptr is set, we crash. - // This is not a problem, assuming that M1 really is dead and - // the pointer we discovered to it will not be used. - if usestackmaps { - debug.invalidptr = 1 - } + // Gccgo uses conservative stack scanning, so we cannot check + // invalid pointers on stack. But we can still enable invalid + // pointer check on heap scanning. When scanning the heap, we + // ensure that we only trace allocated heap objects, which should + // not contain invalid pointers. + debug.invalidptr = 1 for p := gogetenv("GODEBUG"); p != ""; { field := "" diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go index 4f823e0..e4dfbdf 100644 --- a/libgo/go/runtime/runtime2.go +++ b/libgo/go/runtime/runtime2.go @@ -746,6 +746,12 @@ func extendRandom(r []byte, n int) { // A _defer holds an entry on the list of deferred calls. // If you add a field here, add code to clear it in freedefer. +// This struct must match the code in Defer_statement::defer_struct_type +// in the compiler. +// Some defers will be allocated on the stack and some on the heap. +// All defers are logically part of the stack, so write barriers to +// initialize them are not required. All defers must be manually scanned, +// and for heap defers, marked. type _defer struct { // The next entry in the stack. link *_defer @@ -781,6 +787,9 @@ type _defer struct { // function function will be somewhere in libffi, so __retaddr // is not useful. makefunccanrecover bool + + // Whether the _defer is heap allocated. + heap bool } // panics diff --git a/libgo/go/runtime/stack_test.go b/libgo/go/runtime/stack_test.go new file mode 100644 index 0000000..b696253 --- /dev/null +++ b/libgo/go/runtime/stack_test.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import "testing" + +func TestDeferHeapAndStack(t *testing.T) { + P := 4 // processors + N := 10000 // iterations + D := 200 // stack depth + + if testing.Short() { + P /= 2 + N /= 10 + D /= 10 + } + c := make(chan bool) + for p := 0; p < P; p++ { + go func() { + for i := 0; i < N; i++ { + if deferHeapAndStack(D) != 2*D { + panic("bad result") + } + } + c <- true + }() + } + for p := 0; p < P; p++ { + <-c + } +} + +// deferHeapAndStack(n) computes 2*n +func deferHeapAndStack(n int) (r int) { + if n == 0 { + return 0 + } + if n%2 == 0 { + // heap-allocated defers + for i := 0; i < 2; i++ { + defer func() { + r++ + }() + } + } else { + // stack-allocated defers + defer func() { + r++ + }() + defer func() { + r++ + }() + } + r = deferHeapAndStack(n - 1) + escapeMe(new([1024]byte)) // force some GCs + return +} + +// Pass a value to escapeMe to force it to escape. +var escapeMe = func(x interface{}) {} |