diff options
author | Ian Lance Taylor <iant@golang.org> | 2022-02-11 15:02:44 -0800 |
---|---|---|
committer | Ian Lance Taylor <iant@golang.org> | 2022-02-11 15:02:44 -0800 |
commit | 9a510fb0970d3d9a4201bce8965cabe67850386b (patch) | |
tree | 43d7fd2bbfd7ad8c9625a718a5e8718889351994 /libgo/go/runtime/pprof | |
parent | a6d3012b274f38b20e2a57162106f625746af6c6 (diff) | |
parent | 8dc2499aa62f768c6395c9754b8cabc1ce25c494 (diff) | |
download | gcc-9a510fb0970d3d9a4201bce8965cabe67850386b.zip gcc-9a510fb0970d3d9a4201bce8965cabe67850386b.tar.gz gcc-9a510fb0970d3d9a4201bce8965cabe67850386b.tar.bz2 |
Merge from trunk revision 8dc2499aa62f768c6395c9754b8cabc1ce25c494
Diffstat (limited to 'libgo/go/runtime/pprof')
-rw-r--r-- | libgo/go/runtime/pprof/mprof_test.go | 32 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/pprof.go | 11 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/pprof_norusage.go | 1 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/pprof_rusage.go | 1 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/pprof_test.go | 478 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/proto.go | 76 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/proto_test.go | 30 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/rusage_test.go | 39 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/uname_linux_test.go | 61 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/uname_other_test.go | 15 |
10 files changed, 574 insertions, 170 deletions
diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go index 6a448a7..3abf5df 100644 --- a/libgo/go/runtime/pprof/mprof_test.go +++ b/libgo/go/runtime/pprof/mprof_test.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !js -// +build !js package pprof @@ -18,7 +17,7 @@ import ( "unsafe" ) -var memSink interface{} +var memSink any func allocateTransient1M() { for i := 0; i < 1024; i++ { @@ -86,17 +85,6 @@ func TestMemoryProfiler(t *testing.T) { runtime.GC() // materialize stats - // TODO(mknyszek): Fix #45315 and remove this extra call. - // - // Unfortunately, it's possible for the sweep termination condition - // to flap, so with just one runtime.GC call, a freed object could be - // missed, leading this test to fail. A second call reduces the chance - // of this happening to zero, because sweeping actually has to finish - // to move on to the next GC, during which nothing will happen. - // - // See #46500 for more details. - runtime.GC() - memoryProfilerRun++ tests := []struct { @@ -105,33 +93,33 @@ func TestMemoryProfiler(t *testing.T) { }{{ stk: []string{"runtime/pprof.allocatePersistent1K", "runtime/pprof.TestMemoryProfiler"}, legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:48 -# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:83 +# 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:47 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:82 `, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient1M", "runtime/pprof.TestMemoryProfiler"}, legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:25 -# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:80 +# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:24 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:79 `, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2M", "runtime/pprof.TestMemoryProfiler"}, // This should start with "0: 0" but gccgo's imprecise // GC means that sometimes the value is not collected. legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:31 -# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:81 +# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:30 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:80 `, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateTransient2MInline", "runtime/pprof.TestMemoryProfiler"}, legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*/mprof_test.go:35 -# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:82 +# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*/mprof_test.go:34 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:81 `, memoryProfilerRun, (4<<20)*memoryProfilerRun, memoryProfilerRun, (4<<20)*memoryProfilerRun), }, { stk: []string{"runtime/pprof.allocateReflectTransient"}, legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @( 0x[0-9,a-f]+)+ -# 0x[0-9,a-f]+ runtime/pprof\.allocateReflectTransient\+0x[0-9,a-f]+ .*/mprof_test.go:56 +# 0x[0-9,a-f]+ runtime/pprof\.allocateReflectTransient\+0x[0-9,a-f]+ .*/mprof_test.go:55 `, memoryProfilerRun, (3<<20)*memoryProfilerRun, memoryProfilerRun, (3<<20)*memoryProfilerRun), }} diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go index 54838fc..e90cf97 100644 --- a/libgo/go/runtime/pprof/pprof.go +++ b/libgo/go/runtime/pprof/pprof.go @@ -76,6 +76,7 @@ import ( "bufio" "bytes" "fmt" + "internal/abi" "io" "runtime" "sort" @@ -133,7 +134,7 @@ import ( type Profile struct { name string mu sync.Mutex - m map[interface{}][]uintptr + m map[any][]uintptr count func() int write func(io.Writer, int) error } @@ -216,7 +217,7 @@ func NewProfile(name string) *Profile { } p := &Profile{ name: name, - m: map[interface{}][]uintptr{}, + m: map[any][]uintptr{}, } profiles.m[name] = p return p @@ -276,7 +277,7 @@ func (p *Profile) Count() int { // Passing skip=0 begins the stack trace at the call to Add inside rpc.NewClient. // Passing skip=1 begins the stack trace at the call to NewClient inside mypkg.Run. // -func (p *Profile) Add(value interface{}, skip int) { +func (p *Profile) Add(value any, skip int) { if p.name == "" { panic("pprof: use of uninitialized Profile") } @@ -289,7 +290,7 @@ func (p *Profile) Add(value interface{}, skip int) { stk = stk[:n] if len(stk) == 0 { // The value for skip is too large, and there's no stack trace to record. - stk = []uintptr{funcPC(lostProfileEvent) + 1} + stk = []uintptr{abi.FuncPCABIInternal(lostProfileEvent) + 1} } p.mu.Lock() @@ -302,7 +303,7 @@ func (p *Profile) Add(value interface{}, skip int) { // Remove removes the execution stack associated with value from the profile. // It is a no-op if the value is not in the profile. -func (p *Profile) Remove(value interface{}) { +func (p *Profile) Remove(value any) { p.mu.Lock() defer p.mu.Unlock() delete(p.m, value) diff --git a/libgo/go/runtime/pprof/pprof_norusage.go b/libgo/go/runtime/pprof/pprof_norusage.go index e175dd3..cbc5176 100644 --- a/libgo/go/runtime/pprof/pprof_norusage.go +++ b/libgo/go/runtime/pprof/pprof_norusage.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !darwin && !linux -// +build !darwin,!linux package pprof diff --git a/libgo/go/runtime/pprof/pprof_rusage.go b/libgo/go/runtime/pprof/pprof_rusage.go index 269f21b..46263fe 100644 --- a/libgo/go/runtime/pprof/pprof_rusage.go +++ b/libgo/go/runtime/pprof/pprof_rusage.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || linux -// +build darwin linux package pprof diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go index ab96b0c..def49c1 100644 --- a/libgo/go/runtime/pprof/pprof_test.go +++ b/libgo/go/runtime/pprof/pprof_test.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !js -// +build !js package pprof @@ -11,6 +10,7 @@ import ( "bytes" "context" "fmt" + "internal/abi" "internal/profile" "internal/testenv" "io" @@ -20,6 +20,7 @@ import ( "os/exec" "regexp" "runtime" + "runtime/debug" "strings" "sync" "sync/atomic" @@ -88,14 +89,16 @@ func avoidFunctions() []string { } func TestCPUProfile(t *testing.T) { - testCPUProfile(t, stackContains, []string{"pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) { + matches := matchAndAvoidStacks(stackContains, []string{"pprof.cpuHog1"}, avoidFunctions()) + testCPUProfile(t, matches, func(dur time.Duration) { cpuHogger(cpuHog1, &salt1, dur) }) } func TestCPUProfileMultithreaded(t *testing.T) { defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) - testCPUProfile(t, stackContains, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) { + matches := matchAndAvoidStacks(stackContains, []string{"pprof.cpuHog1", "pprof.cpuHog2"}, avoidFunctions()) + testCPUProfile(t, matches, func(dur time.Duration) { c := make(chan int) go func() { cpuHogger(cpuHog1, &salt1, dur) @@ -106,17 +109,139 @@ func TestCPUProfileMultithreaded(t *testing.T) { }) } +func TestCPUProfileMultithreadMagnitude(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("issue 35057 is only confirmed on Linux") + } + + // Linux [5.9,5.16) has a kernel bug that can break CPU timers on newly + // created threads, breaking our CPU accounting. + major, minor, patch, err := linuxKernelVersion() + if err != nil { + t.Errorf("Error determining kernel version: %v", err) + } + t.Logf("Running on Linux %d.%d.%d", major, minor, patch) + defer func() { + if t.Failed() { + t.Logf("Failure of this test may indicate that your system suffers from a known Linux kernel bug fixed on newer kernels. See https://golang.org/issue/49065.") + } + }() + + // Disable on affected builders to avoid flakiness, but otherwise keep + // it enabled to potentially warn users that they are on a broken + // kernel. + if testenv.Builder() != "" && (runtime.GOARCH == "386" || runtime.GOARCH == "amd64") { + have59 := major > 5 || (major == 5 && minor >= 9) + have516 := major > 5 || (major == 5 && minor >= 16) + if have59 && !have516 { + testenv.SkipFlaky(t, 49065) + } + } + + // Run a workload in a single goroutine, then run copies of the same + // workload in several goroutines. For both the serial and parallel cases, + // the CPU time the process measures with its own profiler should match the + // total CPU usage that the OS reports. + // + // We could also check that increases in parallelism (GOMAXPROCS) lead to a + // linear increase in the CPU usage reported by both the OS and the + // profiler, but without a guarantee of exclusive access to CPU resources + // that is likely to be a flaky test. + + // Require the smaller value to be within 10%, or 40% in short mode. + maxDiff := 0.10 + if testing.Short() { + maxDiff = 0.40 + } + + compare := func(a, b time.Duration, maxDiff float64) error { + if a <= 0 || b <= 0 { + return fmt.Errorf("Expected both time reports to be positive") + } + + if a < b { + a, b = b, a + } + + diff := float64(a-b) / float64(a) + if diff > maxDiff { + return fmt.Errorf("CPU usage reports are too different (limit -%.1f%%, got -%.1f%%)", maxDiff*100, diff*100) + } + + return nil + } + + for _, tc := range []struct { + name string + workers int + }{ + { + name: "serial", + workers: 1, + }, + { + name: "parallel", + workers: runtime.GOMAXPROCS(0), + }, + } { + // check that the OS's perspective matches what the Go runtime measures. + t.Run(tc.name, func(t *testing.T) { + t.Logf("Running with %d workers", tc.workers) + + var cpuTime time.Duration + matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions()) + p := testCPUProfile(t, matches, func(dur time.Duration) { + cpuTime = diffCPUTime(t, func() { + var wg sync.WaitGroup + var once sync.Once + for i := 0; i < tc.workers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + var salt = 0 + cpuHogger(cpuHog1, &salt, dur) + once.Do(func() { salt1 = salt }) + }() + } + wg.Wait() + }) + }) + + for i, unit := range []string{"count", "nanoseconds"} { + if have, want := p.SampleType[i].Unit, unit; have != want { + t.Errorf("pN SampleType[%d]; %q != %q", i, have, want) + } + } + + // cpuHog1 called above is the primary source of CPU + // load, but there may be some background work by the + // runtime. Since the OS rusage measurement will + // include all work done by the process, also compare + // against all samples in our profile. + var value time.Duration + for _, sample := range p.Sample { + value += time.Duration(sample.Value[1]) * time.Nanosecond + } + + t.Logf("compare %s vs %s", cpuTime, value) + if err := compare(cpuTime, value, maxDiff); err != nil { + t.Errorf("compare got %v want nil", err) + } + }) + } +} + // containsInlinedCall reports whether the function body for the function f is // known to contain an inlined function call within the first maxBytes bytes. -func containsInlinedCall(f interface{}, maxBytes int) bool { +func containsInlinedCall(f any, maxBytes int) bool { _, found := findInlinedCall(f, maxBytes) return found } // findInlinedCall returns the PC of an inlined function call within // the function body for the function f if any. -func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) { - fFunc := runtime.FuncForPC(uintptr(funcPC(f))) +func findInlinedCall(f any, maxBytes int) (pc uint64, found bool) { + fFunc := runtime.FuncForPC(uintptr(abi.FuncPCABIInternal(f))) if fFunc == nil || fFunc.Entry() == 0 { panic("failed to locate function entry") } @@ -148,7 +273,8 @@ func TestCPUProfileInlining(t *testing.T) { t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.") } - p := testCPUProfile(t, stackContains, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) { + matches := matchAndAvoidStacks(stackContains, []string{"pprof.inlinedCallee", "pprof.inlinedCaller"}, avoidFunctions()) + p := testCPUProfile(t, matches, func(dur time.Duration) { cpuHogger(inlinedCaller, &salt1, dur) }) @@ -198,7 +324,8 @@ func inlinedCalleeDump(pcs []uintptr) { } func TestCPUProfileRecursion(t *testing.T) { - p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions(), func(dur time.Duration) { + matches := matchAndAvoidStacks(stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions()) + p := testCPUProfile(t, matches, func(dur time.Duration) { cpuHogger(recursionCaller, &salt1, dur) }) @@ -283,7 +410,7 @@ func cpuProfilingBroken() bool { // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need, // as interpreted by matches, and returns the parsed profile. -func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) *profile.Profile { +func testCPUProfile(t *testing.T, matches profileMatchFunc, f func(dur time.Duration)) *profile.Profile { switch runtime.GOOS { case "darwin": out, err := exec.Command("uname", "-a").CombinedOutput() @@ -324,7 +451,7 @@ func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []stri f(duration) StopCPUProfile() - if p, ok := profileOk(t, matches, need, avoid, prof, duration); ok { + if p, ok := profileOk(t, matches, prof, duration); ok { return p } @@ -349,6 +476,16 @@ func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []stri return nil } +var diffCPUTimeImpl func(f func()) time.Duration + +func diffCPUTime(t *testing.T, f func()) time.Duration { + if fn := diffCPUTimeImpl; fn != nil { + return fn(f) + } + t.Fatalf("cannot measure CPU time on GOOS=%s GOARCH=%s", runtime.GOOS, runtime.GOARCH) + return 0 +} + func contains(slice []string, s string) bool { for i := range slice { if slice[i] == s { @@ -370,44 +507,18 @@ func stackContains(spec string, count uintptr, stk []*profile.Location, labels m return false } -type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool +type sampleMatchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool -func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) { +func profileOk(t *testing.T, matches profileMatchFunc, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) { ok = true - // Check that profile is well formed, contains 'need', and does not contain - // anything from 'avoid'. - have := make([]uintptr, len(need)) - avoidSamples := make([]uintptr, len(avoid)) var samples uintptr var buf bytes.Buffer p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) { fmt.Fprintf(&buf, "%d:", count) fprintStack(&buf, stk) + fmt.Fprintf(&buf, " labels: %v\n", labels) samples += count - for i, spec := range need { - if matches(spec, count, stk, labels) { - have[i] += count - } - } - for i, name := range avoid { - for _, loc := range stk { - for _, line := range loc.Line { - if strings.Contains(line.Function.Name, name) { - avoidSamples[i] += count - } - } - } - } - for i, name := range avoid { - for _, loc := range stk { - for _, line := range loc.Line { - if strings.Contains(line.Function.Name, name) { - avoidSamples[i] += count - } - } - } - } fmt.Fprintf(&buf, "\n") }) t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String()) @@ -430,39 +541,77 @@ func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, p ok = false } - for i, name := range avoid { - bad := avoidSamples[i] - if bad != 0 { - t.Logf("found %d samples in avoid-function %s\n", bad, name) - ok = false - } + if matches != nil && !matches(t, p) { + ok = false } - if len(need) == 0 { - return p, ok - } + return p, ok +} - var total uintptr - for i, name := range need { - total += have[i] - t.Logf("%s: %d\n", name, have[i]) - } - if total == 0 { - t.Logf("no samples in expected functions") - ok = false - } - // We'd like to check a reasonable minimum, like - // total / len(have) / smallconstant, but this test is - // pretty flaky (see bug 7095). So we'll just test to - // make sure we got at least one sample. - min := uintptr(1) - for i, name := range need { - if have[i] < min { - t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have))) +type profileMatchFunc func(*testing.T, *profile.Profile) bool + +func matchAndAvoidStacks(matches sampleMatchFunc, need []string, avoid []string) profileMatchFunc { + return func(t *testing.T, p *profile.Profile) (ok bool) { + ok = true + + // Check that profile is well formed, contains 'need', and does not contain + // anything from 'avoid'. + have := make([]uintptr, len(need)) + avoidSamples := make([]uintptr, len(avoid)) + + for _, sample := range p.Sample { + count := uintptr(sample.Value[0]) + for i, spec := range need { + if matches(spec, count, sample.Location, sample.Label) { + have[i] += count + } + } + for i, name := range avoid { + for _, loc := range sample.Location { + for _, line := range loc.Line { + if strings.Contains(line.Function.Name, name) { + avoidSamples[i] += count + } + } + } + } + } + + for i, name := range avoid { + bad := avoidSamples[i] + if bad != 0 { + t.Logf("found %d samples in avoid-function %s\n", bad, name) + ok = false + } + } + + if len(need) == 0 { + return + } + + var total uintptr + for i, name := range need { + total += have[i] + t.Logf("%s: %d\n", name, have[i]) + } + if total == 0 { + t.Logf("no samples in expected functions") ok = false } + + // We'd like to check a reasonable minimum, like + // total / len(have) / smallconstant, but this test is + // pretty flaky (see bug 7095). So we'll just test to + // make sure we got at least one sample. + min := uintptr(1) + for i, name := range need { + if have[i] < min { + t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have))) + ok = false + } + } + return } - return p, ok } // Fork can hang if preempted with signals frequently enough (see issue 5517). @@ -574,12 +723,11 @@ func fprintStack(w io.Writer, stk []*profile.Location) { } fmt.Fprintf(w, ")") } - fmt.Fprintf(w, "\n") } // Test that profiling of division operations is okay, especially on ARM. See issue 6681. func TestMathBigDivide(t *testing.T) { - testCPUProfile(t, nil, nil, nil, func(duration time.Duration) { + testCPUProfile(t, nil, func(duration time.Duration) { t := time.After(duration) pi := new(big.Int) for { @@ -611,7 +759,8 @@ func TestMorestack(t *testing.T) { if runtime.Compiler == "gccgo" { t.Skip("no runtime.newstack in gccgo") } - testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) { + matches := matchAndAvoidStacks(stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions()) + testCPUProfile(t, matches, func(duration time.Duration) { t := time.After(duration) c := make(chan bool) for { @@ -1152,11 +1301,10 @@ func TestGoroutineCounts(t *testing.T) { func containsInOrder(s string, all ...string) bool { for _, t := range all { - i := strings.Index(s, t) - if i < 0 { + var ok bool + if _, s, ok = strings.Cut(s, t); !ok { return false } - s = s[i+len(t):] } return true } @@ -1236,22 +1384,23 @@ func TestEmptyCallStack(t *testing.T) { // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key // and value and has funcname somewhere in the stack. func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool { - semi := strings.Index(spec, ";") - if semi == -1 { + base, kv, ok := strings.Cut(spec, ";") + if !ok { panic("no semicolon in key/value spec") } - kv := strings.SplitN(spec[semi+1:], "=", 2) - if len(kv) != 2 { + k, v, ok := strings.Cut(kv, "=") + if !ok { panic("missing = in key/value spec") } - if !contains(labels[kv[0]], kv[1]) { + if !contains(labels[k], v) { return false } - return stackContains(spec[:semi], count, stk, labels) + return stackContains(base, count, stk, labels) } func TestCPUProfileLabel(t *testing.T) { - testCPUProfile(t, stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) { + matches := matchAndAvoidStacks(stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, avoidFunctions()) + testCPUProfile(t, matches, func(dur time.Duration) { Do(context.Background(), Labels("key", "value"), func(context.Context) { cpuHogger(cpuHog1, &salt1, dur) }) @@ -1262,7 +1411,8 @@ func TestLabelRace(t *testing.T) { // Test the race detector annotations for synchronization // between settings labels and consuming them from the // profile. - testCPUProfile(t, stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) { + matches := matchAndAvoidStacks(stackContainsLabeled, []string{"pprof.cpuHogger;key=value"}, nil) + testCPUProfile(t, matches, func(dur time.Duration) { start := time.Now() var wg sync.WaitGroup for time.Since(start) < dur { @@ -1281,6 +1431,126 @@ func TestLabelRace(t *testing.T) { }) } +// TestLabelSystemstack makes sure CPU profiler samples of goroutines running +// on systemstack include the correct pprof labels. See issue #48577 +func TestLabelSystemstack(t *testing.T) { + // Grab and re-set the initial value before continuing to ensure + // GOGC doesn't actually change following the test. + gogc := debug.SetGCPercent(100) + debug.SetGCPercent(gogc) + + matches := matchAndAvoidStacks(stackContainsLabeled, []string{"runtime.systemstack;key=value"}, avoidFunctions()) + p := testCPUProfile(t, matches, func(dur time.Duration) { + Do(context.Background(), Labels("key", "value"), func(ctx context.Context) { + parallelLabelHog(ctx, dur, gogc) + }) + }) + + // Two conditions to check: + // * labelHog should always be labeled. + // * The label should _only_ appear on labelHog and the Do call above. + for _, s := range p.Sample { + isLabeled := s.Label != nil && contains(s.Label["key"], "value") + var ( + mayBeLabeled bool + mustBeLabeled bool + mustNotBeLabeled bool + ) + for _, loc := range s.Location { + for _, l := range loc.Line { + switch l.Function.Name { + case "runtime/pprof.labelHog", "runtime/pprof.parallelLabelHog", "runtime/pprof.parallelLabelHog.func1": + mustBeLabeled = true + case "runtime/pprof.Do": + // Do sets the labels, so samples may + // or may not be labeled depending on + // which part of the function they are + // at. + mayBeLabeled = true + case "runtime.bgsweep", "runtime.bgscavenge", "runtime.forcegchelper", "runtime.gcBgMarkWorker", "runtime.runfinq", "runtime.sysmon": + // Runtime system goroutines or threads + // (such as those identified by + // runtime.isSystemGoroutine). These + // should never be labeled. + mustNotBeLabeled = true + case "gogo", "gosave_systemstack_switch", "racecall": + // These are context switch/race + // critical that we can't do a full + // traceback from. Typically this would + // be covered by the runtime check + // below, but these symbols don't have + // the package name. + mayBeLabeled = true + } + + if l.Function.Name == "" || strings.HasPrefix(l.Function.Name, "runtime.") || strings.HasPrefix(l.Function.Name, "runtime_") { + // There are many places in the runtime + // where we can't do a full traceback. + // Ideally we'd list them all, but + // barring that allow anything in the + // runtime, unless explicitly excluded + // above. + mayBeLabeled = true + } + } + } + if mustNotBeLabeled { + // If this must not be labeled, then mayBeLabeled hints + // are not relevant. + mayBeLabeled = false + } + if mustBeLabeled && !isLabeled { + var buf bytes.Buffer + fprintStack(&buf, s.Location) + t.Errorf("Sample labeled got false want true: %s", buf.String()) + } + if mustNotBeLabeled && isLabeled { + var buf bytes.Buffer + fprintStack(&buf, s.Location) + t.Errorf("Sample labeled got true want false: %s", buf.String()) + } + if isLabeled && !(mayBeLabeled || mustBeLabeled) { + var buf bytes.Buffer + fprintStack(&buf, s.Location) + t.Errorf("Sample labeled got true want false: %s", buf.String()) + } + } +} + +// labelHog is designed to burn CPU time in a way that a high number of CPU +// samples end up running on systemstack. +func labelHog(stop chan struct{}, gogc int) { + // Regression test for issue 50032. We must give GC an opportunity to + // be initially triggered by a labelled goroutine. + runtime.GC() + + for i := 0; ; i++ { + select { + case <-stop: + return + default: + debug.SetGCPercent(gogc) + } + } +} + +// parallelLabelHog runs GOMAXPROCS goroutines running labelHog. +func parallelLabelHog(ctx context.Context, dur time.Duration, gogc int) { + var wg sync.WaitGroup + stop := make(chan struct{}) + for i := 0; i < runtime.GOMAXPROCS(0); i++ { + wg.Add(1) + go func() { + defer wg.Done() + labelHog(stop, gogc) + }() + } + + time.Sleep(dur) + close(stop) + wg.Wait() +} + // Check that there is no deadlock when the program receives SIGPROF while in // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146. func TestAtomicLoadStore64(t *testing.T) { @@ -1387,6 +1657,7 @@ func TestTryAdd(t *testing.T) { testCases := []struct { name string input []uint64 // following the input format assumed by profileBuilder.addCPUData. + count int // number of records in input. wantLocs [][]string // ordered location entries with function names. wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs. }{{ @@ -1396,6 +1667,7 @@ func TestTryAdd(t *testing.T) { 3, 0, 500, // hz = 500. Must match the period. 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1], }, + count: 2, wantLocs: [][]string{ {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}, }, @@ -1412,6 +1684,7 @@ func TestTryAdd(t *testing.T) { 7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1], 5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1], }, + count: 3, wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}}, wantSamples: []*profile.Sample{ {Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}}, @@ -1425,6 +1698,7 @@ func TestTryAdd(t *testing.T) { // entry. The "stk" entry is actually the count. 4, 0, 0, 4242, }, + count: 2, wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}}, wantSamples: []*profile.Sample{ {Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}}, @@ -1443,6 +1717,7 @@ func TestTryAdd(t *testing.T) { 5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0], 4, 0, 40, inlinedCallerStack[0], }, + count: 3, // inlinedCallerDump shows up here because // runtime_expandFinalInlineFrame adds it to the stack frame. wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}}, @@ -1456,6 +1731,7 @@ func TestTryAdd(t *testing.T) { 3, 0, 500, // hz = 500. Must match the period. 9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5], }, + count: 2, wantLocs: [][]string{ {"runtime/pprof.recursionChainBottom"}, { @@ -1479,6 +1755,7 @@ func TestTryAdd(t *testing.T) { 5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1], 4, 0, 60, inlinedCallerStack[0], }, + count: 3, wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}}, wantSamples: []*profile.Sample{ {Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}}, @@ -1491,6 +1768,7 @@ func TestTryAdd(t *testing.T) { 4, 0, 70, inlinedCallerStack[0], 5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1], }, + count: 3, wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}}, wantSamples: []*profile.Sample{ {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, @@ -1503,6 +1781,7 @@ func TestTryAdd(t *testing.T) { 3, 0, 500, // hz = 500. Must match the period. 4, 0, 70, inlinedCallerStack[0], }, + count: 2, wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}}, wantSamples: []*profile.Sample{ {Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}}, @@ -1518,6 +1797,7 @@ func TestTryAdd(t *testing.T) { // from getting merged into above. 5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0], }, + count: 3, wantLocs: [][]string{ {"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}, {"runtime/pprof.inlinedCallerDump"}, @@ -1530,7 +1810,7 @@ func TestTryAdd(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - p, err := translateCPUProfile(tc.input) + p, err := translateCPUProfile(tc.input, tc.count) if err != nil { t.Fatalf("translating profile: %v", err) } @@ -1563,3 +1843,39 @@ func TestTryAdd(t *testing.T) { }) } } + +func TestTimeVDSO(t *testing.T) { + // Test that time functions have the right stack trace. In particular, + // it shouldn't be recursive. + + if runtime.GOOS == "android" { + // Flaky on Android, issue 48655. VDSO may not be enabled. + testenv.SkipFlaky(t, 48655) + } + + matches := matchAndAvoidStacks(stackContains, []string{"time.now"}, avoidFunctions()) + p := testCPUProfile(t, matches, func(dur time.Duration) { + t0 := time.Now() + for { + t := time.Now() + if t.Sub(t0) >= dur { + return + } + } + }) + + // Check for recursive time.now sample. + for _, sample := range p.Sample { + var seenNow bool + for _, loc := range sample.Location { + for _, line := range loc.Line { + if line.Function.Name == "time.now" { + if seenNow { + t.Fatalf("unexpected recursive time.now") + } + seenNow = true + } + } + } + } +} diff --git a/libgo/go/runtime/pprof/proto.go b/libgo/go/runtime/pprof/proto.go index 6c5dd31..39a6f58 100644 --- a/libgo/go/runtime/pprof/proto.go +++ b/libgo/go/runtime/pprof/proto.go @@ -8,11 +8,12 @@ import ( "bytes" "compress/gzip" "fmt" - internalcpu "internal/cpu" + "internal/abi" "io" "os" "runtime" "strconv" + "strings" "time" "unsafe" ) @@ -22,23 +23,6 @@ import ( // (The name shows up in the pprof graphs.) func lostProfileEvent() { lostProfileEvent() } -// funcPC returns the PC for the func value f. -func funcPC(f interface{}) uintptr { - type iface struct { - tab unsafe.Pointer - data unsafe.Pointer - } - i := (*iface)(unsafe.Pointer(&f)) - r := *(*uintptr)(i.data) - if internalcpu.FunctionDescriptors { - // With PPC64 ELF ABI v1 function descriptors the - // function address is a pointer to a struct whose - // first field is the actual PC. - r = *(*uintptr)(unsafe.Pointer(r)) - } - return r -} - // A profileBuilder writes a profile incrementally from a // stream of profile samples delivered by the runtime. type profileBuilder struct { @@ -282,8 +266,9 @@ func newProfileBuilder(w io.Writer) *profileBuilder { } // addCPUData adds the CPU profiling data to the profile. -// The data must be a whole number of records, -// as delivered by the runtime. +// +// The data must be a whole number of records, as delivered by the runtime. +// len(tags) must be equal to the number of records in data. func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error { if !b.havePeriod { // first record is period @@ -298,6 +283,9 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error b.period = 1e9 / int64(data[2]) b.havePeriod = true data = data[3:] + // Consume tag slot. Note that there isn't a meaningful tag + // value for this record. + tags = tags[1:] } // Parse CPU samples from the profile. @@ -322,14 +310,14 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error if data[0] < 3 || tags != nil && len(tags) < 1 { return fmt.Errorf("malformed profile") } + if len(tags) < 1 { + return fmt.Errorf("mismatched profile records and tags") + } count := data[2] stk := data[3:data[0]] data = data[data[0]:] - var tag unsafe.Pointer - if tags != nil { - tag = tags[0] - tags = tags[1:] - } + tag := tags[0] + tags = tags[1:] if count == 0 && len(stk) == 1 { // overflow record @@ -338,11 +326,15 @@ func (b *profileBuilder) addCPUData(data []uint64, tags []unsafe.Pointer) error // gentraceback guarantees that PCs in the // stack can be unconditionally decremented and // still be valid, so we must do the same. - uint64(funcPC(lostProfileEvent) + 1), + uint64(abi.FuncPCABIInternal(lostProfileEvent) + 1), } } b.m.lookup(stk, tag).count += int64(count) } + + if len(tags) != 0 { + return fmt.Errorf("mismatched profile records and tags") + } return nil } @@ -598,6 +590,9 @@ func (b *profileBuilder) readMapping() { } } +var space = []byte(" ") +var newline = []byte("\n") + func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, buildID string)) { // $ cat /proc/self/maps // 00400000-0040b000 r-xp 00000000 fc:01 787766 /bin/cat @@ -624,37 +619,24 @@ func parseProcSelfMaps(data []byte, addMapping func(lo, hi, offset uint64, file, // next removes and returns the next field in the line. // It also removes from line any spaces following the field. next := func() []byte { - j := bytes.IndexByte(line, ' ') - if j < 0 { - f := line - line = nil - return f - } - f := line[:j] - line = line[j+1:] - for len(line) > 0 && line[0] == ' ' { - line = line[1:] - } + var f []byte + f, line, _ = bytes.Cut(line, space) + line = bytes.TrimLeft(line, " ") return f } for len(data) > 0 { - i := bytes.IndexByte(data, '\n') - if i < 0 { - line, data = data, nil - } else { - line, data = data[:i], data[i+1:] - } + line, data, _ = bytes.Cut(data, newline) addr := next() - i = bytes.IndexByte(addr, '-') - if i < 0 { + loStr, hiStr, ok := strings.Cut(string(addr), "-") + if !ok { continue } - lo, err := strconv.ParseUint(string(addr[:i]), 16, 64) + lo, err := strconv.ParseUint(loStr, 16, 64) if err != nil { continue } - hi, err := strconv.ParseUint(string(addr[i+1:]), 16, 64) + hi, err := strconv.ParseUint(hiStr, 16, 64) if err != nil { continue } diff --git a/libgo/go/runtime/pprof/proto_test.go b/libgo/go/runtime/pprof/proto_test.go index 9290210..339b85c 100644 --- a/libgo/go/runtime/pprof/proto_test.go +++ b/libgo/go/runtime/pprof/proto_test.go @@ -8,6 +8,7 @@ import ( "bytes" "encoding/json" "fmt" + "internal/abi" "internal/profile" "internal/testenv" "os" @@ -16,16 +17,20 @@ import ( "runtime" "strings" "testing" + "unsafe" ) // translateCPUProfile parses binary CPU profiling stack trace data // generated by runtime.CPUProfile() into a profile struct. // This is only used for testing. Real conversions stream the // data into the profileBuilder as it becomes available. -func translateCPUProfile(data []uint64) (*profile.Profile, error) { +// +// count is the number of records in data. +func translateCPUProfile(data []uint64, count int) (*profile.Profile, error) { var buf bytes.Buffer b := newProfileBuilder(&buf) - if err := b.addCPUData(data, nil); err != nil { + tags := make([]unsafe.Pointer, count) + if err := b.addCPUData(data, tags); err != nil { return nil, err } b.build() @@ -35,7 +40,7 @@ func translateCPUProfile(data []uint64) (*profile.Profile, error) { // fmtJSON returns a pretty-printed JSON form for x. // It works reasonbly well for printing protocol-buffer // data structures like profile.Profile. -func fmtJSON(x interface{}) string { +func fmtJSON(x any) string { js, _ := json.MarshalIndent(x, "", "\t") return string(js) } @@ -45,7 +50,7 @@ func TestConvertCPUProfileEmpty(t *testing.T) { var buf bytes.Buffer b := []uint64{3, 0, 500} // empty profile at 500 Hz (2ms sample period) - p, err := translateCPUProfile(b) + p, err := translateCPUProfile(b, 1) if err != nil { t.Fatalf("translateCPUProfile: %v", err) } @@ -99,11 +104,11 @@ func testPCs(t *testing.T) (addr1, addr2 uint64, map1, map2 *profile.Mapping) { map2 = mprof.Mapping[1] map2.BuildID, _ = elfBuildID(map2.File) case "js": - addr1 = uint64(funcPC(f1)) - addr2 = uint64(funcPC(f2)) + addr1 = uint64(abi.FuncPCABIInternal(f1)) + addr2 = uint64(abi.FuncPCABIInternal(f2)) default: - addr1 = uint64(funcPC(f1)) - addr2 = uint64(funcPC(f2)) + addr1 = uint64(abi.FuncPCABIInternal(f1)) + addr2 = uint64(abi.FuncPCABIInternal(f2)) // Fake mapping - HasFunctions will be true because two PCs from Go // will be fully symbolized. fake := &profile.Mapping{ID: 1, HasFunctions: true} @@ -121,7 +126,7 @@ func TestConvertCPUProfile(t *testing.T) { 5, 0, 40, uint64(addr2 + 1), uint64(addr2 + 2), // 40 samples in addr2 5, 0, 10, uint64(addr1 + 1), uint64(addr1 + 2), // 10 samples in addr1 } - p, err := translateCPUProfile(b) + p, err := translateCPUProfile(b, 4) if err != nil { t.Fatalf("translating profile: %v", err) } @@ -275,11 +280,10 @@ func TestProcSelfMaps(t *testing.T) { f := func(t *testing.T, input string) { for tx, tt := range strings.Split(input, "\n\n") { - i := strings.Index(tt, "->\n") - if i < 0 { + in, out, ok := strings.Cut(tt, "->\n") + if !ok { t.Fatal("malformed test case") } - in, out := tt[:i], tt[i+len("->\n"):] if len(out) > 0 && out[len(out)-1] != '\n' { out += "\n" } @@ -431,7 +435,7 @@ func TestEmptyStack(t *testing.T) { 3, 0, 500, // hz = 500 3, 0, 10, // 10 samples with an empty stack trace } - _, err := translateCPUProfile(b) + _, err := translateCPUProfile(b, 2) if err != nil { t.Fatalf("translating profile: %v", err) } diff --git a/libgo/go/runtime/pprof/rusage_test.go b/libgo/go/runtime/pprof/rusage_test.go new file mode 100644 index 0000000..b0d651e --- /dev/null +++ b/libgo/go/runtime/pprof/rusage_test.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || freebsd || linux || netbsd || openbsd + +package pprof + +import ( + "syscall" + "time" +) + +func init() { + diffCPUTimeImpl = diffCPUTimeRUsage +} + +func diffCPUTimeRUsage(f func()) time.Duration { + ok := true + var before, after syscall.Rusage + + err := syscall.Getrusage(syscall.RUSAGE_SELF, &before) + if err != nil { + ok = false + } + + f() + + err = syscall.Getrusage(syscall.RUSAGE_SELF, &after) + if err != nil { + ok = false + } + + if !ok { + return 0 + } + + return time.Duration((after.Utime.Nano() + after.Stime.Nano()) - (before.Utime.Nano() + before.Stime.Nano())) +} diff --git a/libgo/go/runtime/pprof/uname_linux_test.go b/libgo/go/runtime/pprof/uname_linux_test.go new file mode 100644 index 0000000..8374c83 --- /dev/null +++ b/libgo/go/runtime/pprof/uname_linux_test.go @@ -0,0 +1,61 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux + +package pprof + +import ( + "fmt" + "regexp" + "strconv" + "syscall" +) + +var versionRe = regexp.MustCompile(`^(\d+)(?:\.(\d+)(?:\.(\d+))).*$`) + +func linuxKernelVersion() (major, minor, patch int, err error) { + var uname syscall.Utsname + if err := syscall.Uname(&uname); err != nil { + return 0, 0, 0, err + } + + buf := make([]byte, 0, len(uname.Release)) + for _, b := range uname.Release { + if b == 0 { + break + } + buf = append(buf, byte(b)) + } + rl := string(buf) + + m := versionRe.FindStringSubmatch(rl) + if m == nil { + return 0, 0, 0, fmt.Errorf("error matching version number in %q", rl) + } + + v, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing major version %q in %s: %w", m[1], rl, err) + } + major = int(v) + + if len(m) >= 3 { + v, err := strconv.ParseInt(m[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing minor version %q in %s: %w", m[2], rl, err) + } + minor = int(v) + } + + if len(m) >= 4 { + v, err := strconv.ParseInt(m[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing patch version %q in %s: %w", m[3], rl, err) + } + patch = int(v) + } + + return +} diff --git a/libgo/go/runtime/pprof/uname_other_test.go b/libgo/go/runtime/pprof/uname_other_test.go new file mode 100644 index 0000000..3276407 --- /dev/null +++ b/libgo/go/runtime/pprof/uname_other_test.go @@ -0,0 +1,15 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !linux + +package pprof + +import ( + "errors" +) + +func linuxKernelVersion() (major, minor, patch int, err error) { + return 0, 0, 0, errors.New("not running on linux") +} |