aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/mprof.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/mprof.go')
-rw-r--r--libgo/go/runtime/mprof.go94
1 files changed, 65 insertions, 29 deletions
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
index afacf8f..9b11597 100644
--- a/libgo/go/runtime/mprof.go
+++ b/libgo/go/runtime/mprof.go
@@ -12,6 +12,10 @@ import (
"unsafe"
)
+// For gofrontend, use go:linkname for blockevent so that
+// runtime/pprof/pprof_test can call it.
+//go:linkname blockevent
+
// NOTE(rsc): Everything here could use cas if contention became an issue.
var proflock mutex
@@ -138,7 +142,7 @@ func (a *memRecordCycle) add(b *memRecordCycle) {
// A blockRecord is the bucket data for a bucket of type blockProfile,
// which is used in blocking and mutex profiles.
type blockRecord struct {
- count int64
+ count float64
cycles int64
}
@@ -431,20 +435,23 @@ func blockevent(cycles int64, skip int) {
if cycles <= 0 {
cycles = 1
}
- if blocksampled(cycles) {
- saveblockevent(cycles, skip+1, blockProfile)
+
+ rate := int64(atomic.Load64(&blockprofilerate))
+ if blocksampled(cycles, rate) {
+ saveblockevent(cycles, rate, skip+1, blockProfile)
}
}
-func blocksampled(cycles int64) bool {
- rate := int64(atomic.Load64(&blockprofilerate))
+// blocksampled returns true for all events where cycles >= rate. Shorter
+// events have a cycles/rate random chance of returning true.
+func blocksampled(cycles, rate int64) bool {
if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) {
return false
}
return true
}
-func saveblockevent(cycles int64, skip int, which bucketType) {
+func saveblockevent(cycles, rate int64, skip int, which bucketType) {
gp := getg()
var nstk int
var stk [maxStack]uintptr
@@ -457,8 +464,15 @@ func saveblockevent(cycles int64, skip int, which bucketType) {
}
lock(&proflock)
b := stkbucket(which, 0, skip, stk[:nstk], true)
- b.bp().count++
- b.bp().cycles += cycles
+
+ if which == blockProfile && cycles < rate {
+ // Remove sampling bias, see discussion on http://golang.org/cl/299991.
+ b.bp().count += float64(rate) / float64(cycles)
+ b.bp().cycles += rate
+ } else {
+ b.bp().count++
+ b.bp().cycles += cycles
+ }
unlock(&proflock)
}
@@ -489,7 +503,7 @@ func mutexevent(cycles int64, skip int) {
// TODO(pjw): measure impact of always calling fastrand vs using something
// like malloc.go:nextSample()
if rate > 0 && int64(fastrand())%rate == 0 {
- saveblockevent(cycles, skip+1, mutexProfile)
+ saveblockevent(cycles, rate, skip+1, mutexProfile)
}
}
@@ -525,7 +539,22 @@ func (r *StackRecord) Stack() []uintptr {
// memory profiling rate should do so just once, as early as
// possible in the execution of the program (for example,
// at the beginning of main).
-var MemProfileRate int = 512 * 1024
+var MemProfileRate int = defaultMemProfileRate(512 * 1024)
+
+// defaultMemProfileRate returns 0 if disableMemoryProfiling is set.
+// It exists primarily for the godoc rendering of MemProfileRate
+// above.
+func defaultMemProfileRate(v int) int {
+ if disableMemoryProfiling {
+ return 0
+ }
+ return v
+}
+
+// disableMemoryProfiling is set by the linker if runtime.MemProfile
+// is not used and the link type guarantees nobody else could use it
+// elsewhere.
+var disableMemoryProfiling bool
// A MemProfileRecord describes the live objects allocated
// by a particular call sequence (stack trace).
@@ -722,9 +751,8 @@ func fixupBucket(b *bucket) {
rawrecord := b.mp()
cb.mp().active.add(&rawrecord.active)
case blockProfile, mutexProfile:
- bpcount := b.bp().count
- cb.bp().count += bpcount
- cb.bp().cycles += bpcount
+ cb.bp().count += b.bp().count
+ cb.bp().cycles += b.bp().cycles
}
}
@@ -866,7 +894,12 @@ func harvestBlockMutexProfile(buckets *bucket, p []BlockProfileRecord) (n int, o
for b := sbuckets; b != nil; b = b.allnext {
bp := b.bp()
r := &p[0]
- r.Count = bp.count
+ r.Count = int64(bp.count)
+ // Prevent callers from having to worry about division by zero errors.
+ // See discussion on http://golang.org/cl/299991.
+ if r.Count == 0 {
+ r.Count = 1
+ }
r.Cycles = bp.cycles
i := 0
var pc uintptr
@@ -962,12 +995,13 @@ func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int
stopTheWorld("profile")
+ // World is stopped, no locking required.
n = 1
- for _, gp1 := range allgs {
+ forEachGRace(func(gp1 *g) {
if isOK(gp1) {
n++
}
- }
+ })
if n <= len(p) {
ok = true
@@ -984,21 +1018,23 @@ func goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int
}
// Save other goroutines.
- for _, gp1 := range allgs {
+ forEachGRace(func(gp1 *g) {
if isOK(gp1) {
- if len(r) == 0 {
- // Should be impossible, but better to return a
- // truncated profile than to crash the entire process.
- break
- }
- saveg(gp1, &r[0])
- if labels != nil {
- lbl[0] = gp1.labels
- lbl = lbl[1:]
- }
- r = r[1:]
+ return
}
- }
+
+ if len(r) == 0 {
+ // Should be impossible, but better to return a
+ // truncated profile than to crash the entire process.
+ return
+ }
+ saveg(gp1, &r[0])
+ if labels != nil {
+ lbl[0] = gp1.labels
+ lbl = lbl[1:]
+ }
+ r = r[1:]
+ })
}
startTheWorld()