diff options
Diffstat (limited to 'libgo/go')
-rw-r--r-- | libgo/go/runtime/mgcmark.go | 5 | ||||
-rw-r--r-- | libgo/go/runtime/panic.go | 38 | ||||
-rw-r--r-- | libgo/go/runtime/runtime2.go | 9 | ||||
-rw-r--r-- | libgo/go/runtime/stack_test.go | 62 |
4 files changed, 114 insertions, 0 deletions
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go index 1b8a7a3..2463a48 100644 --- a/libgo/go/runtime/mgcmark.go +++ b/libgo/go/runtime/mgcmark.go @@ -657,6 +657,11 @@ func scanstack(gp *g, gcw *gcWork) { scanstackblock(uintptr(unsafe.Pointer(&gp.context)), unsafe.Sizeof(gp.context), gcw) } + // Note: in the gc runtime scanstack also scans defer records. + // This is necessary as it uses stack objects (a.k.a. stack tracing). + // We don't (yet) do stack objects, and regular stack/heap scan + // will take care of defer records just fine. + gp.gcscanvalid = true } diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go index 264ad38..88c0a4d 100644 --- a/libgo/go/runtime/panic.go +++ b/libgo/go/runtime/panic.go @@ -13,6 +13,7 @@ import ( // themselves, so that the compiler will export them. // //go:linkname deferproc runtime.deferproc +//go:linkname deferprocStack runtime.deferprocStack //go:linkname deferreturn runtime.deferreturn //go:linkname setdeferretaddr runtime.setdeferretaddr //go:linkname checkdefer runtime.checkdefer @@ -124,6 +125,38 @@ func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) { d.makefunccanrecover = false } +// deferprocStack queues a new deferred function with a defer record on the stack. +// The defer record, d, does not need to be initialized. +// Other arguments are the same as in deferproc. +//go:nosplit +func deferprocStack(d *_defer, frame *bool, pfn uintptr, arg unsafe.Pointer) { + gp := getg() + if gp.m.curg != gp { + // go code on the system stack can't defer + throw("defer on system stack") + } + d.pfn = pfn + d.retaddr = 0 + d.makefunccanrecover = false + d.heap = false + // The lines below implement: + // d.frame = frame + // d.arg = arg + // d._panic = nil + // d.panicStack = gp._panic + // d.link = gp._defer + // But without write barriers. They are writes to the stack so they + // don't need a write barrier, and furthermore are to uninitialized + // memory, so they must not use a write barrier. + *(*uintptr)(unsafe.Pointer(&d.frame)) = uintptr(unsafe.Pointer(frame)) + *(*uintptr)(unsafe.Pointer(&d.arg)) = uintptr(unsafe.Pointer(arg)) + *(*uintptr)(unsafe.Pointer(&d._panic)) = 0 + *(*uintptr)(unsafe.Pointer(&d.panicStack)) = uintptr(unsafe.Pointer(gp._panic)) + *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) + + gp._defer = d +} + // Allocate a Defer, usually using per-P pool. // Each defer must be released with freedefer. func newdefer() *_defer { @@ -155,11 +188,13 @@ func newdefer() *_defer { // Duplicate the tail below so if there's a // crash in checkPut we can tell if d was just // allocated or came from the pool. + d.heap = true d.link = gp._defer gp._defer = d return d } } + d.heap = true d.link = gp._defer gp._defer = d return d @@ -179,6 +214,9 @@ func freedefer(d *_defer) { if d.pfn != 0 { freedeferfn() } + if !d.heap { + return + } pp := getg().m.p.ptr() if len(pp.deferpool) == cap(pp.deferpool) { // Transfer half of local cache to the central cache. diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go index 4f823e0..e4dfbdf 100644 --- a/libgo/go/runtime/runtime2.go +++ b/libgo/go/runtime/runtime2.go @@ -746,6 +746,12 @@ func extendRandom(r []byte, n int) { // A _defer holds an entry on the list of deferred calls. // If you add a field here, add code to clear it in freedefer. +// This struct must match the code in Defer_statement::defer_struct_type +// in the compiler. +// Some defers will be allocated on the stack and some on the heap. +// All defers are logically part of the stack, so write barriers to +// initialize them are not required. All defers must be manually scanned, +// and for heap defers, marked. type _defer struct { // The next entry in the stack. link *_defer @@ -781,6 +787,9 @@ type _defer struct { // function function will be somewhere in libffi, so __retaddr // is not useful. makefunccanrecover bool + + // Whether the _defer is heap allocated. + heap bool } // panics diff --git a/libgo/go/runtime/stack_test.go b/libgo/go/runtime/stack_test.go new file mode 100644 index 0000000..b696253 --- /dev/null +++ b/libgo/go/runtime/stack_test.go @@ -0,0 +1,62 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import "testing" + +func TestDeferHeapAndStack(t *testing.T) { + P := 4 // processors + N := 10000 // iterations + D := 200 // stack depth + + if testing.Short() { + P /= 2 + N /= 10 + D /= 10 + } + c := make(chan bool) + for p := 0; p < P; p++ { + go func() { + for i := 0; i < N; i++ { + if deferHeapAndStack(D) != 2*D { + panic("bad result") + } + } + c <- true + }() + } + for p := 0; p < P; p++ { + <-c + } +} + +// deferHeapAndStack(n) computes 2*n +func deferHeapAndStack(n int) (r int) { + if n == 0 { + return 0 + } + if n%2 == 0 { + // heap-allocated defers + for i := 0; i < 2; i++ { + defer func() { + r++ + }() + } + } else { + // stack-allocated defers + defer func() { + r++ + }() + defer func() { + r++ + }() + } + r = deferHeapAndStack(n - 1) + escapeMe(new([1024]byte)) // force some GCs + return +} + +// Pass a value to escapeMe to force it to escape. +var escapeMe = func(x interface{}) {} |