aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-05-14 14:59:42 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-05-14 14:59:42 +0000
commit1ac09ef2c611d3113665ec8c74e38b125217edb3 (patch)
tree0bed1e11d205c99ef1f13dd4b7aece761779c360 /libgo/go/runtime
parentce9f305e44ff0353ee9e6cb07599240354ae9ed2 (diff)
downloadgcc-1ac09ef2c611d3113665ec8c74e38b125217edb3.zip
gcc-1ac09ef2c611d3113665ec8c74e38b125217edb3.tar.gz
gcc-1ac09ef2c611d3113665ec8c74e38b125217edb3.tar.bz2
libgo: reduce overhead for memory/block/mutex profiling
Revise the gccgo version of memory/block/mutex profiling to reduce runtime overhead. The main change is to collect raw stack traces while the profile is on line, then post-process the stacks just prior to the point where we are ready to use the final product. Memory profiling (at a very low sampling rate) is enabled by default, and the overhead of the symbolization / DWARF-reading from backtrace_full was slowing things down relative to the main Go runtime. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/171497 From-SVN: r271172
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/heapdump.go10
-rw-r--r--libgo/go/runtime/mgcmark.go2
-rw-r--r--libgo/go/runtime/mprof.go325
-rw-r--r--libgo/go/runtime/panic.go4
-rw-r--r--libgo/go/runtime/string.go4
-rw-r--r--libgo/go/runtime/symtab.go8
-rw-r--r--libgo/go/runtime/traceback_gccgo.go16
7 files changed, 284 insertions, 85 deletions
diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go
index 3aa9e8a..b0506a8 100644
--- a/libgo/go/runtime/heapdump.go
+++ b/libgo/go/runtime/heapdump.go
@@ -437,17 +437,15 @@ func dumpmemstats() {
dumpint(uint64(memstats.numgc))
}
-func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *location, size, allocs, frees uintptr) {
- stk := (*[100000]location)(unsafe.Pointer(pstk))
+func dumpmemprof_callback(b *bucket, nstk uintptr, pstk *uintptr, size, allocs, frees uintptr) {
+ stk := (*[100000]uintptr)(unsafe.Pointer(pstk))
dumpint(tagMemProf)
dumpint(uint64(uintptr(unsafe.Pointer(b))))
dumpint(uint64(size))
dumpint(uint64(nstk))
for i := uintptr(0); i < nstk; i++ {
- pc := stk[i].pc
- fn := stk[i].function
- file := stk[i].filename
- line := stk[i].lineno
+ pc := stk[i]
+ fn, file, line, _ := funcfileline(pc, -1)
if fn == "" {
var buf [64]byte
n := len(buf)
diff --git a/libgo/go/runtime/mgcmark.go b/libgo/go/runtime/mgcmark.go
index dc5e797..1b8a7a3 100644
--- a/libgo/go/runtime/mgcmark.go
+++ b/libgo/go/runtime/mgcmark.go
@@ -1085,7 +1085,7 @@ func scanstackblockwithmap(pc, b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
span != nil && span.state != mSpanManual &&
(obj < span.base() || obj >= span.limit || span.state != mSpanInUse) {
print("runtime: found in object at *(", hex(b), "+", hex(i), ") = ", hex(obj), ", pc=", hex(pc), "\n")
- name, file, line := funcfileline(pc, -1)
+ name, file, line, _ := funcfileline(pc, -1)
print(name, "\n", file, ":", line, "\n")
//gcDumpObject("object", b, i)
throw("found bad pointer in Go stack (incorrect use of unsafe or cgo?)")
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
index ab97569..9238e2bb 100644
--- a/libgo/go/runtime/mprof.go
+++ b/libgo/go/runtime/mprof.go
@@ -24,6 +24,10 @@ const (
blockProfile
mutexProfile
+ // a profile bucket from one of the categories above whose stack
+ // trace has been fixed up / pruned.
+ prunedProfile
+
// size of bucket hash table
buckHashSize = 179999
@@ -138,11 +142,13 @@ type blockRecord struct {
}
var (
- mbuckets *bucket // memory profile buckets
- bbuckets *bucket // blocking profile buckets
- xbuckets *bucket // mutex profile buckets
- buckhash *[179999]*bucket
- bucketmem uintptr
+ mbuckets *bucket // memory profile buckets
+ bbuckets *bucket // blocking profile buckets
+ xbuckets *bucket // mutex profile buckets
+ sbuckets *bucket // pre-symbolization profile buckets (stacks fixed up)
+ freebuckets *bucket // freelist of unused fixed up profile buckets
+ buckhash *[179999]*bucket
+ bucketmem uintptr
mProf struct {
// All fields in mProf are protected by proflock.
@@ -158,12 +164,35 @@ var (
const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
+// payloadOffset() returns a pointer into the part of a bucket
+// containing the profile payload (skips past the bucket struct itself
+// and then the stack trace).
+func payloadOffset(typ bucketType, nstk uintptr) uintptr {
+ if typ == prunedProfile {
+ // To allow reuse of prunedProfile buckets between different
+ // collections, allocate them with the max stack size (the portion
+ // of the stack used will vary from trace to trace).
+ nstk = maxStack
+ }
+ return unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr)
+}
+
+func max(x, y uintptr) uintptr {
+ if x > y {
+ return x
+ }
+ return y
+}
+
// newBucket allocates a bucket with the given type and number of stack entries.
func newBucket(typ bucketType, nstk int) *bucket {
- size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(location{})
+ size := payloadOffset(typ, uintptr(nstk))
switch typ {
default:
throw("invalid profile bucket type")
+ case prunedProfile:
+ // stack-fixed buckets are large enough to accommodate any payload.
+ size += max(unsafe.Sizeof(memRecord{}), unsafe.Sizeof(blockRecord{}))
case memProfile:
size += unsafe.Sizeof(memRecord{})
case blockProfile, mutexProfile:
@@ -178,31 +207,29 @@ func newBucket(typ bucketType, nstk int) *bucket {
}
// stk returns the slice in b holding the stack.
-func (b *bucket) stk() []location {
- stk := (*[maxStack]location)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
+func (b *bucket) stk() []uintptr {
+ stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
return stk[:b.nstk:b.nstk]
}
// mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord {
- if b.typ != memProfile {
+ if b.typ != memProfile && b.typ != prunedProfile {
throw("bad use of bucket.mp")
}
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
- return (*memRecord)(data)
+ return (*memRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
}
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
- if b.typ != blockProfile && b.typ != mutexProfile {
+ if b.typ != blockProfile && b.typ != mutexProfile && b.typ != prunedProfile {
throw("bad use of bucket.bp")
}
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(location{}))
- return (*blockRecord)(data)
+ return (*blockRecord)(add(unsafe.Pointer(b), payloadOffset(b.typ, b.nstk)))
}
// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket {
+func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
if buckhash == nil {
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
if buckhash == nil {
@@ -212,8 +239,8 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
// Hash stack.
var h uintptr
- for _, loc := range stk {
- h += loc.pc
+ for _, pc := range stk {
+ h += pc
h += h << 10
h ^= h >> 6
}
@@ -249,6 +276,9 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
} else if typ == mutexProfile {
b.allnext = xbuckets
xbuckets = b
+ } else if typ == prunedProfile {
+ b.allnext = sbuckets
+ sbuckets = b
} else {
b.allnext = bbuckets
bbuckets = b
@@ -256,7 +286,7 @@ func stkbucket(typ bucketType, size uintptr, stk []location, alloc bool) *bucket
return b
}
-func eqslice(x, y []location) bool {
+func eqslice(x, y []uintptr) bool {
if len(x) != len(y) {
return false
}
@@ -338,8 +368,8 @@ func mProf_PostSweep() {
// Called by malloc to record a profiled block.
func mProf_Malloc(p unsafe.Pointer, size uintptr) {
- var stk [maxStack]location
- nstk := callers(4, stk[:])
+ var stk [maxStack]uintptr
+ nstk := callersRaw(1, stk[:])
lock(&proflock)
b := stkbucket(memProfile, size, stk[:nstk], true)
c := mProf.cycle
@@ -414,13 +444,13 @@ func blocksampled(cycles int64) bool {
func saveblockevent(cycles int64, skip int, which bucketType) {
gp := getg()
var nstk int
- var stk [maxStack]location
+ var stk [maxStack]uintptr
if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callers(skip, stk[:])
+ nstk = callersRaw(skip, stk[:])
} else {
// FIXME: This should get a traceback of gp.m.curg.
// nstk = gcallers(gp.m.curg, skip, stk[:])
- nstk = callers(skip, stk[:])
+ nstk = callersRaw(skip, stk[:])
}
lock(&proflock)
b := stkbucket(which, 0, stk[:nstk], true)
@@ -521,6 +551,150 @@ func (r *MemProfileRecord) Stack() []uintptr {
return r.Stack0[0:]
}
+// reusebucket tries to pick a prunedProfile bucket off
+// the freebuckets list, returning it if one is available or nil
+// if the free list is empty.
+func reusebucket(nstk int) *bucket {
+ var b *bucket
+ if freebuckets != nil {
+ b = freebuckets
+ freebuckets = freebuckets.allnext
+ b.typ = prunedProfile
+ b.nstk = uintptr(nstk)
+ mp := b.mp()
+ // Hack: rely on the fact that memprofile records are
+ // larger than blockprofile records when clearing.
+ *mp = memRecord{}
+ }
+ return b
+}
+
+// freebucket appends the specified prunedProfile bucket
+// onto the free list, and removes references to it from the hash.
+func freebucket(tofree *bucket) *bucket {
+ // Thread this bucket into the free list.
+ ret := tofree.allnext
+ tofree.allnext = freebuckets
+ freebuckets = tofree
+
+ // Clean up the hash. The hash may point directly to this bucket...
+ i := int(tofree.hash % buckHashSize)
+ if buckhash[i] == tofree {
+ buckhash[i] = tofree.next
+ } else {
+ // ... or when this bucket was inserted by stkbucket, it may have been
+ // chained off some other unrelated bucket.
+ for b := buckhash[i]; b != nil; b = b.next {
+ if b.next == tofree {
+ b.next = tofree.next
+ break
+ }
+ }
+ }
+ return ret
+}
+
+// fixupStack takes a 'raw' stack trace (stack of PCs generated by
+// callersRaw) and performs pre-symbolization fixup on it, returning
+// the results in 'canonStack'. For each frame we look at the
+// file/func/line information, then use that info to decide whether to
+// include the frame in the final symbolized stack (removing frames
+// corresponding to 'morestack' routines, for example). We also expand
+// frames if the PC values to which they refer correponds to inlined
+// functions to allow for expanded symbolic info to be filled in
+// later. Note: there is code in go-callers.c's backtrace_full callback()
+// function that performs very similar fixups; these two code paths
+// should be kept in sync.
+func fixupStack(stk []uintptr, canonStack *[maxStack]uintptr, size uintptr) int {
+ var cidx int
+ var termTrace bool
+ for _, pc := range stk {
+ // Subtract 1 from PC to undo the 1 we added in callback in
+ // go-callers.c.
+ function, file, _, frames := funcfileline(pc-1, -1)
+
+ // Skip split-stack functions (match by function name)
+ skipFrame := false
+ if hasPrefix(function, "_____morestack_") || hasPrefix(function, "__morestack_") {
+ skipFrame = true
+ }
+
+ // Skip split-stack functions (match by file)
+ if hasSuffix(file, "/morestack.S") {
+ skipFrame = true
+ }
+
+ // Skip thunks and recover functions. There is no equivalent to
+ // these functions in the gc toolchain.
+ fcn := function
+ if hasSuffix(fcn, "..r") {
+ skipFrame = true
+ } else {
+ for fcn != "" && (fcn[len(fcn)-1] >= '0' && fcn[len(fcn)-1] <= '9') {
+ fcn = fcn[:len(fcn)-1]
+ }
+ if hasSuffix(fcn, "..stub") || hasSuffix(fcn, "..thunk") {
+ skipFrame = true
+ }
+ }
+ if skipFrame {
+ continue
+ }
+
+ // Terminate the trace if we encounter a frame corresponding to
+ // runtime.main, runtime.kickoff, makecontext, etc. See the
+ // corresponding code in go-callers.c, callback function used
+ // with backtrace_full.
+ if function == "makecontext" {
+ termTrace = true
+ }
+ if hasSuffix(file, "/proc.c") && function == "runtime_mstart" {
+ termTrace = true
+ }
+ if hasSuffix(file, "/proc.go") &&
+ (function == "runtime.main" || function == "runtime.kickoff") {
+ termTrace = true
+ }
+
+ // Expand inline frames.
+ for i := 0; i < frames; i++ {
+ (*canonStack)[cidx] = pc
+ cidx++
+ if cidx >= maxStack {
+ termTrace = true
+ break
+ }
+ }
+ if termTrace {
+ break
+ }
+ }
+ return cidx
+}
+
+// fixupBucket takes a raw memprofile bucket and creates a new bucket
+// in which the stack trace has been fixed up (inline frames expanded,
+// unwanted frames stripped out). Original bucket is left unmodified;
+// a new symbolizeProfile bucket may be generated as a side effect.
+// Payload information from the original bucket is incorporated into
+// the new bucket.
+func fixupBucket(b *bucket) {
+ var canonStack [maxStack]uintptr
+ frames := fixupStack(b.stk(), &canonStack, b.size)
+ cb := stkbucket(prunedProfile, b.size, canonStack[:frames], true)
+ switch b.typ {
+ default:
+ throw("invalid profile bucket type")
+ case memProfile:
+ rawrecord := b.mp()
+ cb.mp().active.add(&rawrecord.active)
+ case blockProfile, mutexProfile:
+ bpcount := b.bp().count
+ cb.bp().count += bpcount
+ cb.bp().cycles += bpcount
+ }
+}
+
// MemProfile returns a profile of memory allocated and freed per allocation
// site.
//
@@ -576,15 +750,31 @@ func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
}
}
if n <= len(p) {
- ok = true
- idx := 0
- for b := mbuckets; b != nil; b = b.allnext {
+ var bnext *bucket
+
+ // Post-process raw buckets to fix up their stack traces
+ for b := mbuckets; b != nil; b = bnext {
+ bnext = b.allnext
mp := b.mp()
if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
- record(&p[idx], b)
- idx++
+ fixupBucket(b)
}
}
+
+ // Record pruned/fixed-up buckets
+ ok = true
+ idx := 0
+ for b := sbuckets; b != nil; b = b.allnext {
+ record(&p[idx], b)
+ idx++
+ }
+ n = idx
+
+ // Free up pruned buckets for use in next round
+ for b := sbuckets; b != nil; b = bnext {
+ bnext = freebucket(b)
+ }
+ sbuckets = nil
}
unlock(&proflock)
return
@@ -597,18 +787,18 @@ func record(r *MemProfileRecord, b *bucket) {
r.FreeBytes = int64(mp.active.free_bytes)
r.AllocObjects = int64(mp.active.allocs)
r.FreeObjects = int64(mp.active.frees)
- for i, loc := range b.stk() {
+ for i, pc := range b.stk() {
if i >= len(r.Stack0) {
break
}
- r.Stack0[i] = loc.pc
+ r.Stack0[i] = pc
}
for i := int(b.nstk); i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
}
-func iterate_memprof(fn func(*bucket, uintptr, *location, uintptr, uintptr, uintptr)) {
+func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
lock(&proflock)
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
@@ -625,39 +815,59 @@ type BlockProfileRecord struct {
StackRecord
}
-// BlockProfile returns n, the number of records in the current blocking profile.
-// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
-// If len(p) < n, BlockProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.blockprofile flag instead
-// of calling BlockProfile directly.
-func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
- lock(&proflock)
- for b := bbuckets; b != nil; b = b.allnext {
+func harvestBlockMutexProfile(buckets *bucket, p []BlockProfileRecord) (n int, ok bool) {
+ for b := buckets; b != nil; b = b.allnext {
n++
}
if n <= len(p) {
+ var bnext *bucket
+
+ // Post-process raw buckets to create pruned/fixed-up buckets
+ for b := buckets; b != nil; b = bnext {
+ bnext = b.allnext
+ fixupBucket(b)
+ }
+
+ // Record
ok = true
- for b := bbuckets; b != nil; b = b.allnext {
+ for b := sbuckets; b != nil; b = b.allnext {
bp := b.bp()
r := &p[0]
r.Count = bp.count
r.Cycles = bp.cycles
i := 0
- var loc location
- for i, loc = range b.stk() {
+ var pc uintptr
+ for i, pc = range b.stk() {
if i >= len(r.Stack0) {
break
}
- r.Stack0[i] = loc.pc
+ r.Stack0[i] = pc
}
for ; i < len(r.Stack0); i++ {
r.Stack0[i] = 0
}
p = p[1:]
}
+
+ // Free up pruned buckets for use in next round.
+ for b := sbuckets; b != nil; b = bnext {
+ bnext = freebucket(b)
+ }
+ sbuckets = nil
}
+ return
+}
+
+// BlockProfile returns n, the number of records in the current blocking profile.
+// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
+// If len(p) < n, BlockProfile does not change p and returns n, false.
+//
+// Most clients should use the runtime/pprof package or
+// the testing package's -test.blockprofile flag instead
+// of calling BlockProfile directly.
+func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
+ lock(&proflock)
+ n, ok = harvestBlockMutexProfile(bbuckets, p)
unlock(&proflock)
return
}
@@ -670,30 +880,7 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
// instead of calling MutexProfile directly.
func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
lock(&proflock)
- for b := xbuckets; b != nil; b = b.allnext {
- n++
- }
- if n <= len(p) {
- ok = true
- for b := xbuckets; b != nil; b = b.allnext {
- bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
- r.Cycles = bp.cycles
- i := 0
- var loc location
- for i, loc = range b.stk() {
- if i >= len(r.Stack0) {
- break
- }
- r.Stack0[i] = loc.pc
- }
- for ; i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
- p = p[1:]
- }
- }
+ n, ok = harvestBlockMutexProfile(xbuckets, p)
unlock(&proflock)
return
}
diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go
index 9b8ffb9..264ad38 100644
--- a/libgo/go/runtime/panic.go
+++ b/libgo/go/runtime/panic.go
@@ -53,7 +53,7 @@ var indexError = error(errorString("index out of range"))
// entire runtime stack for easier debugging.
func panicindex() {
- name, _, _ := funcfileline(getcallerpc()-1, -1)
+ name, _, _, _ := funcfileline(getcallerpc()-1, -1)
if hasPrefix(name, "runtime.") {
throw(string(indexError.(errorString)))
}
@@ -64,7 +64,7 @@ func panicindex() {
var sliceError = error(errorString("slice bounds out of range"))
func panicslice() {
- name, _, _ := funcfileline(getcallerpc()-1, -1)
+ name, _, _, _ := funcfileline(getcallerpc()-1, -1)
if hasPrefix(name, "runtime.") {
throw(string(sliceError.(errorString)))
}
diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go
index 025ea7a..eac94bf 100644
--- a/libgo/go/runtime/string.go
+++ b/libgo/go/runtime/string.go
@@ -360,6 +360,10 @@ func hasPrefix(s, prefix string) bool {
return len(s) >= len(prefix) && s[:len(prefix)] == prefix
}
+func hasSuffix(s, suffix string) bool {
+ return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
+}
+
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go
index d7e8c18..8f3c843 100644
--- a/libgo/go/runtime/symtab.go
+++ b/libgo/go/runtime/symtab.go
@@ -79,7 +79,7 @@ func (ci *Frames) Next() (frame Frame, more bool) {
// Subtract 1 from PC to undo the 1 we added in callback in
// go-callers.c.
- function, file, line := funcfileline(pc-1, int32(i))
+ function, file, line, _ := funcfileline(pc-1, int32(i))
if function == "" && file == "" {
return Frame{}, more
}
@@ -158,7 +158,7 @@ const (
// the a *Func describing the innermost function, but with an entry
// of the outermost function.
func FuncForPC(pc uintptr) *Func {
- name, _, _ := funcfileline(pc, -1)
+ name, _, _, _ := funcfileline(pc, -1)
if name == "" {
return nil
}
@@ -187,7 +187,7 @@ func (f *Func) Entry() uintptr {
// The result will not be accurate if pc is not a program
// counter within f.
func (f *Func) FileLine(pc uintptr) (file string, line int) {
- _, file, line = funcfileline(pc, -1)
+ _, file, line, _ = funcfileline(pc, -1)
return file, line
}
@@ -261,5 +261,5 @@ func demangleSymbol(s string) string {
}
// implemented in go-caller.c
-func funcfileline(uintptr, int32) (string, string, int)
+func funcfileline(uintptr, int32) (string, string, int, int)
func funcentry(uintptr) uintptr
diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go
index 7581798..b0eecf2 100644
--- a/libgo/go/runtime/traceback_gccgo.go
+++ b/libgo/go/runtime/traceback_gccgo.go
@@ -20,7 +20,7 @@ func printcreatedby(gp *g) {
if entry != 0 && tracepc > entry {
tracepc -= sys.PCQuantum
}
- function, file, line := funcfileline(tracepc, -1)
+ function, file, line, _ := funcfileline(tracepc, -1)
if function != "" && showframe(function, gp, false) && gp.goid != 1 {
printcreatedby1(function, file, line, entry, pc)
}
@@ -61,6 +61,16 @@ func callers(skip int, locbuf []location) int {
return int(n)
}
+//go:noescape
+//extern runtime_callersRaw
+func c_callersRaw(skip int32, pcs *uintptr, max int32) int32
+
+// callersRaw returns a raw (PCs only) stack trace of the current goroutine.
+func callersRaw(skip int, pcbuf []uintptr) int {
+ n := c_callersRaw(int32(skip)+1, &pcbuf[0], int32(len(pcbuf)))
+ return int(n)
+}
+
// traceback prints a traceback of the current goroutine.
// This differs from the gc version, which is given pc, sp, lr and g and
// can print a traceback of any goroutine.
@@ -83,7 +93,7 @@ func traceback(skip int32) {
func printAncestorTraceback(ancestor ancestorInfo) {
print("[originating from goroutine ", ancestor.goid, "]:\n")
for fidx, pc := range ancestor.pcs {
- function, file, line := funcfileline(pc, -1)
+ function, file, line, _ := funcfileline(pc, -1)
if showfuncinfo(function, fidx == 0) {
printAncestorTracebackFuncInfo(function, file, line, pc)
}
@@ -92,7 +102,7 @@ func printAncestorTraceback(ancestor ancestorInfo) {
print("...additional frames elided...\n")
}
// Show what created goroutine, except main goroutine (goid 1).
- function, file, line := funcfileline(ancestor.gopc, -1)
+ function, file, line, _ := funcfileline(ancestor.gopc, -1)
if function != "" && showfuncinfo(function, false) && ancestor.goid != 1 {
printcreatedby1(function, file, line, funcentry(ancestor.gopc), ancestor.gopc)
}