diff options
author | Ian Lance Taylor <iant@golang.org> | 2020-04-06 14:04:45 -0700 |
---|---|---|
committer | Ian Lance Taylor <iant@golang.org> | 2020-04-06 16:37:24 -0700 |
commit | 52fa80f853c0b0f623ea9e4c7198e324ce44ff30 (patch) | |
tree | e2695726e95b7bd125d52b7bdd315cb0028854fa /libgo/go/runtime | |
parent | 749bd22ddc50b5112e5ed506ffef7249bf8e6fb3 (diff) | |
download | gcc-52fa80f853c0b0f623ea9e4c7198e324ce44ff30.zip gcc-52fa80f853c0b0f623ea9e4c7198e324ce44ff30.tar.gz gcc-52fa80f853c0b0f623ea9e4c7198e324ce44ff30.tar.bz2 |
libgo: update to almost the 1.14.2 release
Update to edea4a79e8d7dea2456b688f492c8af33d381dc2 which is likely to
be approximately the 1.14.2 release.
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/227377
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r-- | libgo/go/runtime/alg.go | 12 | ||||
-rw-r--r-- | libgo/go/runtime/checkptr.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/checkptr_test.go | 9 | ||||
-rw-r--r-- | libgo/go/runtime/defer_test.go | 120 | ||||
-rw-r--r-- | libgo/go/runtime/export_test.go | 25 | ||||
-rw-r--r-- | libgo/go/runtime/hash_test.go | 49 | ||||
-rw-r--r-- | libgo/go/runtime/mgc.go | 48 | ||||
-rw-r--r-- | libgo/go/runtime/mkpreempt.go | 43 | ||||
-rw-r--r-- | libgo/go/runtime/panic.go | 15 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/map.go | 3 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/pprof_test.go | 59 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/proto.go | 45 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/proto_test.go | 13 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/runtime.go | 3 | ||||
-rw-r--r-- | libgo/go/runtime/runtime2.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/signal_unix.go | 1 | ||||
-rw-r--r-- | libgo/go/runtime/symtab.go | 31 | ||||
-rw-r--r-- | libgo/go/runtime/testdata/testprog/checkptr.go | 11 | ||||
-rw-r--r-- | libgo/go/runtime/time.go | 256 |
19 files changed, 522 insertions, 229 deletions
diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go index 101402c..95f02aa 100644 --- a/libgo/go/runtime/alg.go +++ b/libgo/go/runtime/alg.go @@ -176,9 +176,19 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr { // is slower but more general and is used for hashing interface types // (called from interhash or nilinterhash, above) or for hashing in // maps generated by reflect.MapOf (reflect_typehash, below). +// Note: this function must match the compiler generated +// functions exactly. See issue 37716. func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { if t.tflag&tflagRegularMemory != 0 { - return memhash(p, h, t.size) + // Handle ptr sizes specially, see issue 37086. + switch t.size { + case 4: + return memhash32(p, h) + case 8: + return memhash64(p, h) + default: + return memhash(p, h, t.size) + } } switch t.kind & kindMask { case kindFloat32: diff --git a/libgo/go/runtime/checkptr.go b/libgo/go/runtime/checkptr.go index 974f0a0..d5f116c 100644 --- a/libgo/go/runtime/checkptr.go +++ b/libgo/go/runtime/checkptr.go @@ -10,8 +10,10 @@ import "unsafe" func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) { // Check that (*[n]elem)(p) is appropriately aligned. + // Note that we allow unaligned pointers if the types they point to contain + // no pointers themselves. See issue 37298. // TODO(mdempsky): What about fieldAlign? - if uintptr(p)&(uintptr(elem.align)-1) != 0 { + if elem.ptrdata != 0 && uintptr(p)&(uintptr(elem.align)-1) != 0 { throw("checkptr: unsafe pointer conversion") } diff --git a/libgo/go/runtime/checkptr_test.go b/libgo/go/runtime/checkptr_test.go index ab3058f..0ca7b20 100644 --- a/libgo/go/runtime/checkptr_test.go +++ b/libgo/go/runtime/checkptr_test.go @@ -28,7 +28,8 @@ func TestCheckPtr(t *testing.T) { cmd string want string }{ - {"CheckPtrAlignment", "fatal error: checkptr: unsafe pointer conversion\n"}, + {"CheckPtrAlignmentPtr", "fatal error: checkptr: unsafe pointer conversion\n"}, + {"CheckPtrAlignmentNoPtr", ""}, {"CheckPtrArithmetic", "fatal error: checkptr: unsafe pointer arithmetic\n"}, {"CheckPtrSize", "fatal error: checkptr: unsafe pointer conversion\n"}, {"CheckPtrSmall", "fatal error: checkptr: unsafe pointer arithmetic\n"}, @@ -42,6 +43,12 @@ func TestCheckPtr(t *testing.T) { if err != nil { t.Log(err) } + if tc.want == "" { + if len(got) > 0 { + t.Errorf("output:\n%s\nwant no output", got) + } + return + } if !strings.HasPrefix(string(got), tc.want) { t.Errorf("output:\n%s\n\nwant output starting with: %s", got, tc.want) } diff --git a/libgo/go/runtime/defer_test.go b/libgo/go/runtime/defer_test.go index 3d8f812..11436a1 100644 --- a/libgo/go/runtime/defer_test.go +++ b/libgo/go/runtime/defer_test.go @@ -6,6 +6,7 @@ package runtime_test import ( "fmt" + "os" "reflect" "runtime" "testing" @@ -281,3 +282,122 @@ func TestDeferForFuncWithNoExit(t *testing.T) { for { } } + +// Test case approximating issue #37664, where a recursive function (interpreter) +// may do repeated recovers/re-panics until it reaches the frame where the panic +// can actually be handled. The recurseFnPanicRec() function is testing that there +// are no stale defer structs on the defer chain after the interpreter() sequence, +// by writing a bunch of 0xffffffffs into several recursive stack frames, and then +// doing a single panic-recover which would invoke any such stale defer structs. +func TestDeferWithRepeatedRepanics(t *testing.T) { + interpreter(0, 6, 2) + recurseFnPanicRec(0, 10) + interpreter(0, 5, 1) + recurseFnPanicRec(0, 10) + interpreter(0, 6, 3) + recurseFnPanicRec(0, 10) +} + +func interpreter(level int, maxlevel int, rec int) { + defer func() { + e := recover() + if e == nil { + return + } + if level != e.(int) { + //fmt.Fprintln(os.Stderr, "re-panicing, level", level) + panic(e) + } + //fmt.Fprintln(os.Stderr, "Recovered, level", level) + }() + if level+1 < maxlevel { + interpreter(level+1, maxlevel, rec) + } else { + //fmt.Fprintln(os.Stderr, "Initiating panic") + panic(rec) + } +} + +func recurseFnPanicRec(level int, maxlevel int) { + defer func() { + recover() + }() + recurseFn(level, maxlevel) +} + +func recurseFn(level int, maxlevel int) { + a := [40]uint32{0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff} + if level+1 < maxlevel { + // Need this print statement to keep a around. '_ = a[4]' doesn't do it. + fmt.Fprintln(os.Stderr, "recurseFn", level, a[4]) + recurseFn(level+1, maxlevel) + } else { + panic("recurseFn panic") + } +} + +// Try to reproduce issue #37688, where a pointer to an open-coded defer struct is +// mistakenly held, and that struct keeps a pointer to a stack-allocated defer +// struct, and that stack-allocated struct gets overwritten or the stack gets +// moved, so a memory error happens on GC. +func TestIssue37688(t *testing.T) { + for j := 0; j < 10; j++ { + g2() + g3() + } +} + +type foo struct { +} + +func (f *foo) method1() { + fmt.Fprintln(os.Stderr, "method1") +} + +func (f *foo) method2() { + fmt.Fprintln(os.Stderr, "method2") +} + +func g2() { + var a foo + ap := &a + // The loop forces this defer to be heap-allocated and the remaining two + // to be stack-allocated. + for i := 0; i < 1; i++ { + defer ap.method1() + } + defer ap.method2() + defer ap.method1() + ff1(ap, 1, 2, 3, 4, 5, 6, 7, 8, 9) + // Try to get the stack to be be moved by growing it too large, so + // existing stack-allocated defer becomes invalid. + rec1(2000) +} + +func g3() { + // Mix up the stack layout by adding in an extra function frame + g2() +} + +func ff1(ap *foo, a, b, c, d, e, f, g, h, i int) { + defer ap.method1() + + // Make a defer that has a very large set of args, hence big size for the + // defer record for the open-coded frame (which means it won't use the + // defer pool) + defer func(ap *foo, a, b, c, d, e, f, g, h, i int) { + if v := recover(); v != nil { + fmt.Fprintln(os.Stderr, "did recover") + } + fmt.Fprintln(os.Stderr, "debug", ap, a, b, c, d, e, f, g, h) + }(ap, a, b, c, d, e, f, g, h, i) + panic("ff1 panic") +} + +func rec1(max int) { + if max > 0 { + rec1(max - 1) + } else { + fmt.Fprintln(os.Stderr, "finished recursion", max) + } +} diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go index b60c19b..6595faf 100644 --- a/libgo/go/runtime/export_test.go +++ b/libgo/go/runtime/export_test.go @@ -945,4 +945,29 @@ func SemNwait(addr *uint32) uint32 { return atomic.Load(&root.nwait) } +// MapHashCheck computes the hash of the key k for the map m, twice. +// Method 1 uses the built-in hasher for the map. +// Method 2 uses the typehash function (the one used by reflect). +// Returns the two hash values, which should always be equal. +func MapHashCheck(m interface{}, k interface{}) (uintptr, uintptr) { + // Unpack m. + mt := (*maptype)(unsafe.Pointer(efaceOf(&m)._type)) + mh := (*hmap)(efaceOf(&m).data) + + // Unpack k. + kt := efaceOf(&k)._type + var p unsafe.Pointer + if isDirectIface(kt) { + q := efaceOf(&k).data + p = unsafe.Pointer(&q) + } else { + p = efaceOf(&k).data + } + + // Compute the hash functions. + x := mt.hasher(noescape(p), uintptr(mh.hash0)) + y := typehash(kt, noescape(p), uintptr(mh.hash0)) + return x, y +} + var Pusestackmaps = &usestackmaps diff --git a/libgo/go/runtime/hash_test.go b/libgo/go/runtime/hash_test.go index d57be4c..522b7fe 100644 --- a/libgo/go/runtime/hash_test.go +++ b/libgo/go/runtime/hash_test.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/rand" + "reflect" . "runtime" "strings" "testing" @@ -48,6 +49,54 @@ func TestMemHash64Equality(t *testing.T) { } } +func TestCompilerVsRuntimeHash(t *testing.T) { + // Test to make sure the compiler's hash function and the runtime's hash function agree. + // See issue 37716. + for _, m := range []interface{}{ + map[bool]int{}, + map[int8]int{}, + map[uint8]int{}, + map[int16]int{}, + map[uint16]int{}, + map[int32]int{}, + map[uint32]int{}, + map[int64]int{}, + map[uint64]int{}, + map[int]int{}, + map[uint]int{}, + map[uintptr]int{}, + map[*byte]int{}, + map[chan int]int{}, + map[unsafe.Pointer]int{}, + map[float32]int{}, + map[float64]int{}, + map[complex64]int{}, + map[complex128]int{}, + map[string]int{}, + //map[interface{}]int{}, + //map[interface{F()}]int{}, + map[[8]uint64]int{}, + map[[8]string]int{}, + map[struct{ a, b, c, d int32 }]int{}, // Note: tests AMEM128 + map[struct{ a, b, _, d int32 }]int{}, + map[struct { + a, b int32 + c float32 + d, e [8]byte + }]int{}, + map[struct { + a int16 + b int64 + }]int{}, + } { + k := reflect.New(reflect.TypeOf(m).Key()).Elem().Interface() // the zero key + x, y := MapHashCheck(m, k) + if x != y { + t.Errorf("hashes did not match (%x vs %x) for map %T", x, y, m) + } + } +} + // Smhasher is a torture test for hash functions. // https://code.google.com/p/smhasher/ // This code is a port of some of the Smhasher tests to Go. diff --git a/libgo/go/runtime/mgc.go b/libgo/go/runtime/mgc.go index 8ded306..24043cf 100644 --- a/libgo/go/runtime/mgc.go +++ b/libgo/go/runtime/mgc.go @@ -771,32 +771,40 @@ func gcSetTriggerRatio(triggerRatio float64) { goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100 } - // If we let triggerRatio go too low, then if the application - // is allocating very rapidly we might end up in a situation - // where we're allocating black during a nearly always-on GC. - // The result of this is a growing heap and ultimately an - // increase in RSS. By capping us at a point >0, we're essentially - // saying that we're OK using more CPU during the GC to prevent - // this growth in RSS. - // - // The current constant was chosen empirically: given a sufficiently - // fast/scalable allocator with 48 Ps that could drive the trigger ratio - // to <0.05, this constant causes applications to retain the same peak - // RSS compared to not having this allocator. - const minTriggerRatio = 0.6 - // Set the trigger ratio, capped to reasonable bounds. - if triggerRatio < minTriggerRatio { - // This can happen if the mutator is allocating very - // quickly or the GC is scanning very slowly. - triggerRatio = minTriggerRatio - } else if gcpercent >= 0 { + if gcpercent >= 0 { + scalingFactor := float64(gcpercent) / 100 // Ensure there's always a little margin so that the // mutator assist ratio isn't infinity. - maxTriggerRatio := 0.95 * float64(gcpercent) / 100 + maxTriggerRatio := 0.95 * scalingFactor if triggerRatio > maxTriggerRatio { triggerRatio = maxTriggerRatio } + + // If we let triggerRatio go too low, then if the application + // is allocating very rapidly we might end up in a situation + // where we're allocating black during a nearly always-on GC. + // The result of this is a growing heap and ultimately an + // increase in RSS. By capping us at a point >0, we're essentially + // saying that we're OK using more CPU during the GC to prevent + // this growth in RSS. + // + // The current constant was chosen empirically: given a sufficiently + // fast/scalable allocator with 48 Ps that could drive the trigger ratio + // to <0.05, this constant causes applications to retain the same peak + // RSS compared to not having this allocator. + minTriggerRatio := 0.6 * scalingFactor + if triggerRatio < minTriggerRatio { + triggerRatio = minTriggerRatio + } + } else if triggerRatio < 0 { + // gcpercent < 0, so just make sure we're not getting a negative + // triggerRatio. This case isn't expected to happen in practice, + // and doesn't really matter because if gcpercent < 0 then we won't + // ever consume triggerRatio further on in this function, but let's + // just be defensive here; the triggerRatio being negative is almost + // certainly undesirable. + triggerRatio = 0 } memstats.triggerRatio = triggerRatio diff --git a/libgo/go/runtime/mkpreempt.go b/libgo/go/runtime/mkpreempt.go index 31b6f5c..35ed428 100644 --- a/libgo/go/runtime/mkpreempt.go +++ b/libgo/go/runtime/mkpreempt.go @@ -244,23 +244,26 @@ func genAMD64() { // TODO: MXCSR register? + p("PUSHQ BP") + p("MOVQ SP, BP") + p("// Save flags before clobbering them") + p("PUSHFQ") + p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP") + p("ADJSP $%d", l.stack) + p("// But vet doesn't know ADJSP, so suppress vet stack checking") + p("NOP SP") + // Apparently, the signal handling code path in darwin kernel leaves // the upper bits of Y registers in a dirty state, which causes // many SSE operations (128-bit and narrower) become much slower. // Clear the upper bits to get to a clean state. See issue #37174. // It is safe here as Go code don't use the upper bits of Y registers. p("#ifdef GOOS_darwin") + p("CMPB internal∕cpu·X86+const_offsetX86HasAVX(SB), $0") + p("JE 2(PC)") p("VZEROUPPER") p("#endif") - p("PUSHQ BP") - p("MOVQ SP, BP") - p("// Save flags before clobbering them") - p("PUSHFQ") - p("// obj doesn't understand ADD/SUB on SP, but does understand ADJSP") - p("ADJSP $%d", l.stack) - p("// But vet doesn't know ADJSP, so suppress vet stack checking") - p("NOP SP") l.save() p("CALL ·asyncPreempt2(SB)") l.restore() @@ -379,6 +382,7 @@ func genMIPS(_64bit bool) { sub := "SUB" r28 := "R28" regsize := 4 + softfloat := "GOMIPS_softfloat" if _64bit { mov = "MOVV" movf = "MOVD" @@ -386,6 +390,7 @@ func genMIPS(_64bit bool) { sub = "SUBV" r28 = "RSB" regsize = 8 + softfloat = "GOMIPS64_softfloat" } // Add integer registers R1-R22, R24-R25, R28 @@ -408,28 +413,36 @@ func genMIPS(_64bit bool) { mov+" LO, R1\n"+mov+" R1, %d(R29)", mov+" %d(R29), R1\n"+mov+" R1, LO", regsize) + // Add floating point control/status register FCR31 (FCR0-FCR30 are irrelevant) - l.addSpecial( + var lfp = layout{sp: "R29", stack: l.stack} + lfp.addSpecial( mov+" FCR31, R1\n"+mov+" R1, %d(R29)", mov+" %d(R29), R1\n"+mov+" R1, FCR31", regsize) // Add floating point registers F0-F31. for i := 0; i <= 31; i++ { reg := fmt.Sprintf("F%d", i) - l.add(movf, reg, regsize) + lfp.add(movf, reg, regsize) } // allocate frame, save PC of interrupted instruction (in LR) - p(mov+" R31, -%d(R29)", l.stack) - p(sub+" $%d, R29", l.stack) + p(mov+" R31, -%d(R29)", lfp.stack) + p(sub+" $%d, R29", lfp.stack) l.save() + p("#ifndef %s", softfloat) + lfp.save() + p("#endif") p("CALL ·asyncPreempt2(SB)") + p("#ifndef %s", softfloat) + lfp.restore() + p("#endif") l.restore() - p(mov+" %d(R29), R31", l.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it - p(mov + " (R29), R23") // load PC to REGTMP - p(add+" $%d, R29", l.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) + p(mov+" %d(R29), R31", lfp.stack) // sigctxt.pushCall has pushed LR (at interrupt) on stack, restore it + p(mov + " (R29), R23") // load PC to REGTMP + p(add+" $%d, R29", lfp.stack+regsize) // pop frame (including the space pushed by sigctxt.pushCall) p("JMP (R23)") } diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go index 88c598e..f6747f5 100644 --- a/libgo/go/runtime/panic.go +++ b/libgo/go/runtime/panic.go @@ -218,10 +218,13 @@ func panicmem() { // pfn is a C function pointer. // arg is a value to pass to pfn. func deferproc(frame *bool, pfn uintptr, arg unsafe.Pointer) { + gp := getg() d := newdefer() if d._panic != nil { throw("deferproc: d.panic != nil after newdefer") } + d.link = gp._defer + gp._defer = d d.frame = frame d.panicStack = getg()._panic d.pfn = pfn @@ -300,8 +303,6 @@ func newdefer() *_defer { } } d.heap = true - d.link = gp._defer - gp._defer = d return d } @@ -1175,6 +1176,12 @@ func startpanic_m() bool { } } +// throwReportQuirk, if non-nil, is called by throw after dumping the stacks. +// +// TODO(austin): Remove this after Go 1.15 when we remove the +// mlockGsignal workaround. +var throwReportQuirk func() + var didothers bool var deadlock mutex @@ -1221,6 +1228,10 @@ func dopanic_m(gp *g, pc, sp uintptr) bool { printDebugLog() + if throwReportQuirk != nil { + throwReportQuirk() + } + return docrash } diff --git a/libgo/go/runtime/pprof/map.go b/libgo/go/runtime/pprof/map.go index a271ad0..7c75872 100644 --- a/libgo/go/runtime/pprof/map.go +++ b/libgo/go/runtime/pprof/map.go @@ -68,7 +68,8 @@ Search: if len(m.freeStk) < len(stk) { m.freeStk = make([]uintptr, 1024) } - e.stk = m.freeStk[:len(stk)] + // Limit cap to prevent append from clobbering freeStk. + e.stk = m.freeStk[:len(stk):len(stk)] m.freeStk = m.freeStk[len(stk):] for j := range stk { diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go index bba9ee3..239466f 100644 --- a/libgo/go/runtime/pprof/pprof_test.go +++ b/libgo/go/runtime/pprof/pprof_test.go @@ -1192,16 +1192,37 @@ func TestTryAdd(t *testing.T) { {Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}}, }, }, { - name: "recursive_inlined_funcs", + name: "bug38096", + input: []uint64{ + 3, 0, 500, // hz = 500. Must match the period. + // count (data[2]) == 0 && len(stk) == 1 is an overflow + // entry. The "stk" entry is actually the count. + 4, 0, 0, 4242, + }, + wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}}, + wantSamples: []*profile.Sample{ + {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}}, + }, + }, { + // If a function is called recursively then it must not be + // inlined in the caller. + // + // N.B. We're generating an impossible profile here, with a + // recursive inlineCallee call. This is simulating a non-Go + // function that looks like an inlined Go function other than + // its recursive property. See pcDeck.tryAdd. + name: "recursive_func_is_not_inlined", input: []uint64{ 3, 0, 500, // hz = 500. Must match the period. 5, 0, 30, inlinedCalleePtr, inlinedCalleePtr, 4, 0, 40, inlinedCalleePtr, }, - wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}}, + // inlinedCaller shows up here because + // runtime_expandFinalInlineFrame adds it to the stack frame. + wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}, {"runtime/pprof.inlinedCaller"}}, wantSamples: []*profile.Sample{ - {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}}, - {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}}, + {Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}}, + {Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}}, }, }, { name: "truncated_stack_trace_later", @@ -1222,12 +1243,36 @@ func TestTryAdd(t *testing.T) { 4, 0, 70, inlinedCalleePtr, 5, 0, 80, inlinedCalleePtr, inlinedCallerPtr, }, - wantLocs: [][]string{ // the inline info is screwed up, but better than a crash. - {"runtime/pprof.inlinedCallee"}, + wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}}, + wantSamples: []*profile.Sample{ + {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, + {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}}, + }, + }, { + // We can recover the inlined caller from a truncated stack. + name: "truncated_stack_trace_only", + input: []uint64{ + 3, 0, 500, // hz = 500. Must match the period. + 4, 0, 70, inlinedCalleePtr, + }, + wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}}, + wantSamples: []*profile.Sample{ + {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, + }, + }, { + // The same location is used for duplicated stacks. + name: "truncated_stack_trace_twice", + input: []uint64{ + 3, 0, 500, // hz = 500. Must match the period. + 4, 0, 70, inlinedCalleePtr, + 5, 0, 80, inlinedCallerPtr, inlinedCalleePtr, + }, + wantLocs: [][]string{ + {"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, {"runtime/pprof.inlinedCaller"}}, wantSamples: []*profile.Sample{ {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, - {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}}, + {Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}}, }, }} diff --git a/libgo/go/runtime/pprof/proto.go b/libgo/go/runtime/pprof/proto.go index ba5db85..15fa44b 100644 --- a/libgo/go/runtime/pprof/proto.go +++ b/libgo/go/runtime/pprof/proto.go @@ -335,7 +335,10 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error // overflow record count = uint64(stk[0]) stk = []uint64{ - uint64(funcPC(lostProfileEvent)), + // gentraceback guarantees that PCs in the + // stack can be unconditionally decremented and + // still be valid, so we must do the same. + uint64(funcPC(lostProfileEvent) + 1), } } b.m.lookup(stk, tag).count += int64(count) @@ -397,6 +400,10 @@ func (b *profileBuilder) build() { // It may emit to b.pb, so there must be no message encoding in progress. func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLocs []uint64) { b.deck.reset() + + // The last frame might be truncated. Recover lost inline frames. + stk = runtime_expandFinalInlineFrame(stk) + for len(stk) > 0 { addr := stk[0] if l, ok := b.locs[addr]; ok { @@ -408,22 +415,12 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo // then, record the cached location. locs = append(locs, l.id) - // The stk may be truncated due to the stack depth limit - // (e.g. See maxStack and maxCPUProfStack in runtime) or - // bugs in runtime. Avoid the crash in either case. - // TODO(hyangah): The correct fix may require using the exact - // pcs as the key for b.locs cache management instead of just - // relying on the very first pc. We are late in the go1.14 dev - // cycle, so this is a workaround with little code change. - if len(l.pcs) > len(stk) { - stk = nil - // TODO(hyangah): would be nice if we can enable - // debug print out on demand and report the problematic - // cached location entry and stack traces. Do we already - // have such facility to utilize (e.g. GODEBUG)? - } else { - stk = stk[len(l.pcs):] // skip the matching pcs. - } + // Skip the matching pcs. + // + // Even if stk was truncated due to the stack depth + // limit, expandFinalInlineFrame above has already + // fixed the truncation, ensuring it is long enough. + stk = stk[len(l.pcs):] continue } @@ -440,9 +437,9 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo stk = stk[1:] continue } - // add failed because this addr is not inlined with - // the existing PCs in the deck. Flush the deck and retry to - // handle this pc. + // add failed because this addr is not inlined with the + // existing PCs in the deck. Flush the deck and retry handling + // this pc. if id := b.emitLocation(); id > 0 { locs = append(locs, id) } @@ -476,8 +473,8 @@ func (b *profileBuilder) appendLocsForStack(locs []uint64, stk []uintptr) (newLo // the fake pcs and restore the inlined and entry functions. Inlined functions // have the following properties: // Frame's Func is nil (note: also true for non-Go functions), and -// Frame's Entry matches its entry function frame's Entry. (note: could also be true for recursive calls and non-Go functions), -// Frame's Name does not match its entry function frame's name. +// Frame's Entry matches its entry function frame's Entry (note: could also be true for recursive calls and non-Go functions), and +// Frame's Name does not match its entry function frame's name (note: inlined functions cannot be recursive). // // As reading and processing the pcs in a stack trace one by one (from leaf to the root), // we use pcDeck to temporarily hold the observed pcs and their expanded frames @@ -499,8 +496,8 @@ func (d *pcDeck) reset() { // to the deck. If it fails the caller needs to flush the deck and retry. func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symbolizeFlag) (success bool) { if existing := len(d.pcs); existing > 0 { - // 'frames' are all expanded from one 'pc' and represent all inlined functions - // so we check only the last one. + // 'd.frames' are all expanded from one 'pc' and represent all + // inlined functions so we check only the last one. newFrame := frames[0] last := d.frames[existing-1] if last.Func != nil { // the last frame can't be inlined. Flush. diff --git a/libgo/go/runtime/pprof/proto_test.go b/libgo/go/runtime/pprof/proto_test.go index 67d6b5d..81cd559 100644 --- a/libgo/go/runtime/pprof/proto_test.go +++ b/libgo/go/runtime/pprof/proto_test.go @@ -424,3 +424,16 @@ func TestFakeMapping(t *testing.T) { } } } + +// Make sure the profiler can handle an empty stack trace. +// See issue 37967. +func TestEmptyStack(t *testing.T) { + b := []uint64{ + 3, 0, 500, // hz = 500 + 3, 0, 10, // 10 samples with an empty stack trace + } + _, err := translateCPUProfile(b) + if err != nil { + t.Fatalf("translating profile: %v", err) + } +} diff --git a/libgo/go/runtime/pprof/runtime.go b/libgo/go/runtime/pprof/runtime.go index b71bbad..dd2545b 100644 --- a/libgo/go/runtime/pprof/runtime.go +++ b/libgo/go/runtime/pprof/runtime.go @@ -9,6 +9,9 @@ import ( "unsafe" ) +// runtime_expandFinalInlineFrame is defined in runtime/symtab.go. +func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr + // runtime_setProfLabel is defined in runtime/proflabel.go. func runtime_setProfLabel(labels unsafe.Pointer) diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go index f5bfc08..75b42f7 100644 --- a/libgo/go/runtime/runtime2.go +++ b/libgo/go/runtime/runtime2.go @@ -593,6 +593,10 @@ type m struct { // requested, but fails. Accessed atomically. preemptGen uint32 + // Whether this is a pending preemption signal on this M. + // Accessed atomically. + signalPending uint32 + dlogPerM mOS diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go index 150345f..1e057f6 100644 --- a/libgo/go/runtime/signal_unix.go +++ b/libgo/go/runtime/signal_unix.go @@ -343,6 +343,7 @@ func doSigPreempt(gp *g, ctxt *sigctxt, sigpc uintptr) { // Acknowledge the preemption. atomic.Xadd(&gp.m.preemptGen, 1) + atomic.Store(&gp.m.signalPending, 0) } // gccgo-specific definition. diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go index 5dd3894..8673457 100644 --- a/libgo/go/runtime/symtab.go +++ b/libgo/go/runtime/symtab.go @@ -4,6 +4,10 @@ package runtime +import ( + _ "unsafe" // for go:linkname +) + // Frames may be used to get function/file/line information for a // slice of PC values returned by Callers. type Frames struct { @@ -108,6 +112,33 @@ func (ci *Frames) Next() (frame Frame, more bool) { return frame, more } +//go:noescape +// pcInlineCallers is written in C. +func pcInlineCallers(pc uintptr, locbuf *location, max int32) int32 + +// runtime_expandFinalInlineFrame expands the final pc in stk to include all +// "callers" if pc is inline. +// +//go:linkname runtime_expandFinalInlineFrame runtime..z2fpprof.runtime_expandFinalInlineFrame +func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr { + if len(stk) == 0 { + return stk + } + pc := stk[len(stk)-1] + tracepc := pc - 1 + + var locbuf [_TracebackMaxFrames]location + n := pcInlineCallers(tracepc, &locbuf[0], int32(len(locbuf))) + + // Returning the same PC several times causes Frame.Next to do + // the right thing. + for i := int32(1); i < n; i++ { + stk = append(stk, pc) + } + + return stk +} + // NOTE: Func does not expose the actual unexported fields, because we return *Func // values to users, and we want to keep them from being able to overwrite the data // with (say) *f = Func{}. diff --git a/libgo/go/runtime/testdata/testprog/checkptr.go b/libgo/go/runtime/testdata/testprog/checkptr.go index 177db38..45e6fb1 100644 --- a/libgo/go/runtime/testdata/testprog/checkptr.go +++ b/libgo/go/runtime/testdata/testprog/checkptr.go @@ -7,18 +7,25 @@ package main import "unsafe" func init() { - register("CheckPtrAlignment", CheckPtrAlignment) + register("CheckPtrAlignmentNoPtr", CheckPtrAlignmentNoPtr) + register("CheckPtrAlignmentPtr", CheckPtrAlignmentPtr) register("CheckPtrArithmetic", CheckPtrArithmetic) register("CheckPtrSize", CheckPtrSize) register("CheckPtrSmall", CheckPtrSmall) } -func CheckPtrAlignment() { +func CheckPtrAlignmentNoPtr() { var x [2]int64 p := unsafe.Pointer(&x[0]) sink2 = (*int64)(unsafe.Pointer(uintptr(p) + 1)) } +func CheckPtrAlignmentPtr() { + var x [2]int64 + p := unsafe.Pointer(&x[0]) + sink2 = (**int64)(unsafe.Pointer(uintptr(p) + 1)) +} + func CheckPtrArithmetic() { var x int i := uintptr(unsafe.Pointer(&x)) diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go index d0dd3a4..27d88d4 100644 --- a/libgo/go/runtime/time.go +++ b/libgo/go/runtime/time.go @@ -73,36 +73,26 @@ type timer struct { // timerNoStatus -> timerWaiting // anything else -> panic: invalid value // deltimer: -// timerWaiting -> timerDeleted +// timerWaiting -> timerModifying -> timerDeleted // timerModifiedEarlier -> timerModifying -> timerDeleted -// timerModifiedLater -> timerDeleted +// timerModifiedLater -> timerModifying -> timerDeleted // timerNoStatus -> do nothing // timerDeleted -> do nothing // timerRemoving -> do nothing // timerRemoved -> do nothing // timerRunning -> wait until status changes // timerMoving -> wait until status changes -// timerModifying -> panic: concurrent deltimer/modtimer calls +// timerModifying -> wait until status changes // modtimer: // timerWaiting -> timerModifying -> timerModifiedXX // timerModifiedXX -> timerModifying -> timerModifiedYY -// timerNoStatus -> timerWaiting -// timerRemoved -> timerWaiting +// timerNoStatus -> timerModifying -> timerWaiting +// timerRemoved -> timerModifying -> timerWaiting +// timerDeleted -> timerModifying -> timerModifiedXX // timerRunning -> wait until status changes // timerMoving -> wait until status changes // timerRemoving -> wait until status changes -// timerDeleted -> panic: concurrent modtimer/deltimer calls -// timerModifying -> panic: concurrent modtimer calls -// resettimer: -// timerNoStatus -> timerWaiting -// timerRemoved -> timerWaiting -// timerDeleted -> timerModifying -> timerModifiedXX -// timerRemoving -> wait until status changes -// timerRunning -> wait until status changes -// timerWaiting -> panic: resettimer called on active timer -// timerMoving -> panic: resettimer called on active timer -// timerModifiedXX -> panic: resettimer called on active timer -// timerModifying -> panic: resettimer called on active timer +// timerModifying -> wait until status changes // cleantimers (looks in P's timer heap): // timerDeleted -> timerRemoving -> timerRemoved // timerModifiedXX -> timerMoving -> timerWaiting @@ -250,32 +240,24 @@ func addtimer(t *timer) { t.when = maxWhen } if t.status != timerNoStatus { - badTimer() + throw("addtimer called with initialized timer") } t.status = timerWaiting - addInitializedTimer(t) -} - -// addInitializedTimer adds an initialized timer to the current P. -func addInitializedTimer(t *timer) { when := t.when pp := getg().m.p.ptr() lock(&pp.timersLock) - ok := cleantimers(pp) && doaddtimer(pp, t) + cleantimers(pp) + doaddtimer(pp, t) unlock(&pp.timersLock) - if !ok { - badTimer() - } wakeNetPoller(when) } // doaddtimer adds t to the current P's heap. -// It reports whether it saw no problems due to races. // The caller must have locked the timers for pp. -func doaddtimer(pp *p, t *timer) bool { +func doaddtimer(pp *p, t *timer) { // Timers rely on the network poller, so make sure the poller // has started. if netpollInited == 0 { @@ -288,12 +270,11 @@ func doaddtimer(pp *p, t *timer) bool { t.pp.set(pp) i := len(pp.timers) pp.timers = append(pp.timers, t) - ok := siftupTimer(pp.timers, i) + siftupTimer(pp.timers, i) if t == pp.timers[0] { atomic.Store64(&pp.timer0When, uint64(t.when)) } atomic.Xadd(&pp.numTimers, 1) - return ok } // deltimer deletes the timer t. It may be on some other P, so we can't @@ -304,22 +285,42 @@ func deltimer(t *timer) bool { for { switch s := atomic.Load(&t.status); s { case timerWaiting, timerModifiedLater: - tpp := t.pp.ptr() - if atomic.Cas(&t.status, s, timerDeleted) { + // Prevent preemption while the timer is in timerModifying. + // This could lead to a self-deadlock. See #38070. + mp := acquirem() + if atomic.Cas(&t.status, s, timerModifying) { + // Must fetch t.pp before changing status, + // as cleantimers in another goroutine + // can clear t.pp of a timerDeleted timer. + tpp := t.pp.ptr() + if !atomic.Cas(&t.status, timerModifying, timerDeleted) { + badTimer() + } + releasem(mp) atomic.Xadd(&tpp.deletedTimers, 1) // Timer was not yet run. return true + } else { + releasem(mp) } case timerModifiedEarlier: - tpp := t.pp.ptr() + // Prevent preemption while the timer is in timerModifying. + // This could lead to a self-deadlock. See #38070. + mp := acquirem() if atomic.Cas(&t.status, s, timerModifying) { + // Must fetch t.pp before setting status + // to timerDeleted. + tpp := t.pp.ptr() atomic.Xadd(&tpp.adjustTimers, -1) if !atomic.Cas(&t.status, timerModifying, timerDeleted) { badTimer() } + releasem(mp) atomic.Xadd(&tpp.deletedTimers, 1) // Timer was not yet run. return true + } else { + releasem(mp) } case timerDeleted, timerRemoving, timerRemoved: // Timer was already run. @@ -334,7 +335,8 @@ func deltimer(t *timer) bool { return false case timerModifying: // Simultaneous calls to deltimer and modtimer. - badTimer() + // Wait for the other call to complete. + osyield() default: badTimer() } @@ -345,7 +347,7 @@ func deltimer(t *timer) bool { // We are locked on the P when this is called. // It reports whether it saw no problems due to races. // The caller must have locked the timers for pp. -func dodeltimer(pp *p, i int) bool { +func dodeltimer(pp *p, i int) { if t := pp.timers[i]; t.pp.ptr() != pp { throw("dodeltimer: wrong P") } else { @@ -357,29 +359,23 @@ func dodeltimer(pp *p, i int) bool { } pp.timers[last] = nil pp.timers = pp.timers[:last] - ok := true if i != last { // Moving to i may have moved the last timer to a new parent, // so sift up to preserve the heap guarantee. - if !siftupTimer(pp.timers, i) { - ok = false - } - if !siftdownTimer(pp.timers, i) { - ok = false - } + siftupTimer(pp.timers, i) + siftdownTimer(pp.timers, i) } if i == 0 { updateTimer0When(pp) } atomic.Xadd(&pp.numTimers, -1) - return ok } // dodeltimer0 removes timer 0 from the current P's heap. // We are locked on the P when this is called. // It reports whether it saw no problems due to races. // The caller must have locked the timers for pp. -func dodeltimer0(pp *p) bool { +func dodeltimer0(pp *p) { if t := pp.timers[0]; t.pp.ptr() != pp { throw("dodeltimer0: wrong P") } else { @@ -391,13 +387,11 @@ func dodeltimer0(pp *p) bool { } pp.timers[last] = nil pp.timers = pp.timers[:last] - ok := true if last > 0 { - ok = siftdownTimer(pp.timers, 0) + siftdownTimer(pp.timers, 0) } updateTimer0When(pp) atomic.Xadd(&pp.numTimers, -1) - return ok } // modtimer modifies an existing timer. @@ -409,30 +403,47 @@ func modtimer(t *timer, when, period int64, f func(interface{}, uintptr), arg in status := uint32(timerNoStatus) wasRemoved := false + var mp *m loop: for { switch status = atomic.Load(&t.status); status { case timerWaiting, timerModifiedEarlier, timerModifiedLater: + // Prevent preemption while the timer is in timerModifying. + // This could lead to a self-deadlock. See #38070. + mp = acquirem() if atomic.Cas(&t.status, status, timerModifying) { break loop } + releasem(mp) case timerNoStatus, timerRemoved: + // Prevent preemption while the timer is in timerModifying. + // This could lead to a self-deadlock. See #38070. + mp = acquirem() + // Timer was already run and t is no longer in a heap. // Act like addtimer. - if atomic.Cas(&t.status, status, timerWaiting) { + if atomic.Cas(&t.status, status, timerModifying) { wasRemoved = true break loop } + releasem(mp) + case timerDeleted: + // Prevent preemption while the timer is in timerModifying. + // This could lead to a self-deadlock. See #38070. + mp = acquirem() + if atomic.Cas(&t.status, status, timerModifying) { + atomic.Xadd(&t.pp.ptr().deletedTimers, -1) + break loop + } + releasem(mp) case timerRunning, timerRemoving, timerMoving: // The timer is being run or moved, by a different P. // Wait for it to complete. osyield() - case timerDeleted: - // Simultaneous calls to modtimer and deltimer. - badTimer() case timerModifying: // Multiple simultaneous calls to modtimer. - badTimer() + // Wait for the other call to complete. + osyield() default: badTimer() } @@ -445,7 +456,15 @@ loop: if wasRemoved { t.when = when - addInitializedTimer(t) + pp := getg().m.p.ptr() + lock(&pp.timersLock) + doaddtimer(pp, t) + unlock(&pp.timersLock) + if !atomic.Cas(&t.status, timerModifying, timerWaiting) { + badTimer() + } + releasem(mp) + wakeNetPoller(when) } else { // The timer is in some other P's heap, so we can't change // the when field. If we did, the other P's heap would @@ -462,7 +481,6 @@ loop: // Update the adjustTimers field. Subtract one if we // are removing a timerModifiedEarlier, add one if we // are adding a timerModifiedEarlier. - tpp := t.pp.ptr() adjust := int32(0) if status == timerModifiedEarlier { adjust-- @@ -471,13 +489,14 @@ loop: adjust++ } if adjust != 0 { - atomic.Xadd(&tpp.adjustTimers, adjust) + atomic.Xadd(&t.pp.ptr().adjustTimers, adjust) } // Set the new status of the timer. if !atomic.Cas(&t.status, timerModifying, newStatus) { badTimer() } + releasem(mp) // If the new status is earlier, wake up the poller. if newStatus == timerModifiedEarlier { @@ -486,67 +505,22 @@ loop: } } -// resettimer resets an existing inactive timer to turn it into an active timer, -// with a new time for when the timer should fire. +// resettimer resets the time when a timer should fire. +// If used for an inactive timer, the timer will become active. // This should be called instead of addtimer if the timer value has been, // or may have been, used previously. func resettimer(t *timer, when int64) { - if when < 0 { - when = maxWhen - } - - for { - switch s := atomic.Load(&t.status); s { - case timerNoStatus, timerRemoved: - if atomic.Cas(&t.status, s, timerWaiting) { - t.when = when - addInitializedTimer(t) - return - } - case timerDeleted: - tpp := t.pp.ptr() - if atomic.Cas(&t.status, s, timerModifying) { - t.nextwhen = when - newStatus := uint32(timerModifiedLater) - if when < t.when { - newStatus = timerModifiedEarlier - atomic.Xadd(&t.pp.ptr().adjustTimers, 1) - } - if !atomic.Cas(&t.status, timerModifying, newStatus) { - badTimer() - } - atomic.Xadd(&tpp.deletedTimers, -1) - if newStatus == timerModifiedEarlier { - wakeNetPoller(when) - } - return - } - case timerRemoving: - // Wait for the removal to complete. - osyield() - case timerRunning: - // Even though the timer should not be active, - // we can see timerRunning if the timer function - // permits some other goroutine to call resettimer. - // Wait until the run is complete. - osyield() - case timerWaiting, timerModifying, timerModifiedEarlier, timerModifiedLater, timerMoving: - // Called resettimer on active timer. - badTimer() - default: - badTimer() - } - } + modtimer(t, when, t.period, t.f, t.arg, t.seq) } // cleantimers cleans up the head of the timer queue. This speeds up // programs that create and delete timers; leaving them in the heap // slows down addtimer. Reports whether no timer problems were found. // The caller must have locked the timers for pp. -func cleantimers(pp *p) bool { +func cleantimers(pp *p) { for { if len(pp.timers) == 0 { - return true + return } t := pp.timers[0] if t.pp.ptr() != pp { @@ -557,11 +531,9 @@ func cleantimers(pp *p) bool { if !atomic.Cas(&t.status, s, timerRemoving) { continue } - if !dodeltimer0(pp) { - return false - } + dodeltimer0(pp) if !atomic.Cas(&t.status, timerRemoving, timerRemoved) { - return false + badTimer() } atomic.Xadd(&pp.deletedTimers, -1) case timerModifiedEarlier, timerModifiedLater: @@ -571,21 +543,17 @@ func cleantimers(pp *p) bool { // Now we can change the when field. t.when = t.nextwhen // Move t to the right position. - if !dodeltimer0(pp) { - return false - } - if !doaddtimer(pp, t) { - return false - } + dodeltimer0(pp) + doaddtimer(pp, t) if s == timerModifiedEarlier { atomic.Xadd(&pp.adjustTimers, -1) } if !atomic.Cas(&t.status, timerMoving, timerWaiting) { - return false + badTimer() } default: // Head of timers does not need adjustment. - return true + return } } } @@ -601,9 +569,7 @@ func moveTimers(pp *p, timers []*timer) { switch s := atomic.Load(&t.status); s { case timerWaiting: t.pp = 0 - if !doaddtimer(pp, t) { - badTimer() - } + doaddtimer(pp, t) break loop case timerModifiedEarlier, timerModifiedLater: if !atomic.Cas(&t.status, s, timerMoving) { @@ -611,9 +577,7 @@ func moveTimers(pp *p, timers []*timer) { } t.when = t.nextwhen t.pp = 0 - if !doaddtimer(pp, t) { - badTimer() - } + doaddtimer(pp, t) if !atomic.Cas(&t.status, timerMoving, timerWaiting) { badTimer() } @@ -667,9 +631,7 @@ loop: switch s := atomic.Load(&t.status); s { case timerDeleted: if atomic.Cas(&t.status, s, timerRemoving) { - if !dodeltimer(pp, i) { - badTimer() - } + dodeltimer(pp, i) if !atomic.Cas(&t.status, timerRemoving, timerRemoved) { badTimer() } @@ -685,9 +647,7 @@ loop: // We don't add it back yet because the // heap manipulation could cause our // loop to skip some other timer. - if !dodeltimer(pp, i) { - badTimer() - } + dodeltimer(pp, i) moved = append(moved, t) if s == timerModifiedEarlier { if n := atomic.Xadd(&pp.adjustTimers, -1); int32(n) <= 0 { @@ -723,9 +683,7 @@ loop: // back to the timer heap. func addAdjustedTimers(pp *p, moved []*timer) { for _, t := range moved { - if !doaddtimer(pp, t) { - badTimer() - } + doaddtimer(pp, t) if !atomic.Cas(&t.status, timerMoving, timerWaiting) { badTimer() } @@ -779,9 +737,7 @@ func runtimer(pp *p, now int64) int64 { if !atomic.Cas(&t.status, s, timerRemoving) { continue } - if !dodeltimer0(pp) { - badTimer() - } + dodeltimer0(pp) if !atomic.Cas(&t.status, timerRemoving, timerRemoved) { badTimer() } @@ -795,12 +751,8 @@ func runtimer(pp *p, now int64) int64 { continue } t.when = t.nextwhen - if !dodeltimer0(pp) { - badTimer() - } - if !doaddtimer(pp, t) { - badTimer() - } + dodeltimer0(pp) + doaddtimer(pp, t) if s == timerModifiedEarlier { atomic.Xadd(&pp.adjustTimers, -1) } @@ -838,18 +790,14 @@ func runOneTimer(pp *p, t *timer, now int64) { // Leave in heap but adjust next time to fire. delta := t.when - now t.when += t.period * (1 + -delta/t.period) - if !siftdownTimer(pp.timers, 0) { - badTimer() - } + siftdownTimer(pp.timers, 0) if !atomic.Cas(&t.status, timerRunning, timerWaiting) { badTimer() } updateTimer0When(pp) } else { // Remove from heap. - if !dodeltimer0(pp) { - badTimer() - } + dodeltimer0(pp) if !atomic.Cas(&t.status, timerRunning, timerNoStatus) { badTimer() } @@ -1053,9 +1001,9 @@ func timeSleepUntil() (int64, *p) { // "panic holding locks" message. Instead, we panic while not // holding a lock. -func siftupTimer(t []*timer, i int) bool { +func siftupTimer(t []*timer, i int) { if i >= len(t) { - return false + badTimer() } when := t[i].when tmp := t[i] @@ -1070,13 +1018,12 @@ func siftupTimer(t []*timer, i int) bool { if tmp != t[i] { t[i] = tmp } - return true } -func siftdownTimer(t []*timer, i int) bool { +func siftdownTimer(t []*timer, i int) { n := len(t) if i >= n { - return false + badTimer() } when := t[i].when tmp := t[i] @@ -1111,7 +1058,6 @@ func siftdownTimer(t []*timer, i int) bool { if tmp != t[i] { t[i] = tmp } - return true } // badTimer is called if the timer data structures have been corrupted, @@ -1119,5 +1065,5 @@ func siftdownTimer(t []*timer, i int) bool { // panicing due to invalid slice access while holding locks. // See issue #25686. func badTimer() { - panic(errorString("racy use of timers")) + throw("timer data corruption") } |