diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2018-10-25 22:18:08 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2018-10-25 22:18:08 +0000 |
commit | 34489eb2af3bbb7be101bc838615cf4a4dc6828d (patch) | |
tree | 0dbda78980d4553fdaeee92ca666d72a2ab95213 /libgo/go/runtime | |
parent | fc756f9f460d5f0ec73a72128645fdb39fec77a0 (diff) | |
download | gcc-34489eb2af3bbb7be101bc838615cf4a4dc6828d.zip gcc-34489eb2af3bbb7be101bc838615cf4a4dc6828d.tar.gz gcc-34489eb2af3bbb7be101bc838615cf4a4dc6828d.tar.bz2 |
compiler: improve name mangling for packpaths
The current implementation of Gogo::pkgpath_for_symbol was written in
a way that allowed two distinct package paths to map to the same
symbol, which could cause collisions at link- time or compile-time.
Switch to a better mangling scheme to insure that we get a unique
packagepath symbol for each package. In the new scheme instead of having
separate mangling schemes for identifiers and package paths, the
main identifier mangler ("go_encode_id") now handles mangling of
both packagepath characters and identifier characters.
The new mangling scheme is more intrusive: "foo/bar.Baz" is mangled as
"foo..z2fbar.Baz" instead of "foo_bar.Baz". To mitigate this, this
patch also adds a demangling capability so that function names
returned from runtime.CallersFrames are converted back to their
original unmangled form.
Changing the pkgpath_for_symbol scheme requires updating a number of
//go:linkname directives and C "__asm__" directives to match the new
scheme, as well as updating the 'gotest' driver (which makes
assumptions about the correct mapping from pkgpath symbol to package
name).
Fixes golang/go#27534.
Reviewed-on: https://go-review.googlesource.com/c/135455
From-SVN: r265510
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r-- | libgo/go/runtime/atomic_pointer.go | 12 | ||||
-rw-r--r-- | libgo/go/runtime/cpuprof.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/debug/stack_test.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/heapdump.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/internal/atomic/atomic.c | 46 | ||||
-rw-r--r-- | libgo/go/runtime/mgc.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/mheap.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/mstats.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/net_plan9.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/netpoll.go | 18 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/mprof_test.go | 6 | ||||
-rw-r--r-- | libgo/go/runtime/proc.go | 6 | ||||
-rw-r--r-- | libgo/go/runtime/proflabel.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/rdebug.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/runtime1.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/sema.go | 4 | ||||
-rw-r--r-- | libgo/go/runtime/sigqueue.go | 12 | ||||
-rw-r--r-- | libgo/go/runtime/symtab.go | 74 | ||||
-rw-r--r-- | libgo/go/runtime/time.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/trace.go | 8 | ||||
-rw-r--r-- | libgo/go/runtime/traceback_gccgo.go | 7 |
21 files changed, 152 insertions, 73 deletions
diff --git a/libgo/go/runtime/atomic_pointer.go b/libgo/go/runtime/atomic_pointer.go index 2d023d3..03d8d6a 100644 --- a/libgo/go/runtime/atomic_pointer.go +++ b/libgo/go/runtime/atomic_pointer.go @@ -52,10 +52,10 @@ func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { // We cannot just call the runtime routines, because the race detector expects // to be able to intercept the sync/atomic forms but not the runtime forms. -//go:linkname sync_atomic_StoreUintptr sync_atomic.StoreUintptr +//go:linkname sync_atomic_StoreUintptr sync..z2fatomic.StoreUintptr func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr) -//go:linkname sync_atomic_StorePointer sync_atomic.StorePointer +//go:linkname sync_atomic_StorePointer sync..z2fatomic.StorePointer //go:nosplit func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { if writeBarrier.enabled { @@ -64,10 +64,10 @@ func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) } -//go:linkname sync_atomic_SwapUintptr sync_atomic.SwapUintptr +//go:linkname sync_atomic_SwapUintptr sync..z2fatomic.SwapUintptr func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr -//go:linkname sync_atomic_SwapPointer sync_atomic.SwapPointer +//go:linkname sync_atomic_SwapPointer sync..z2fatomic.SwapPointer //go:nosplit func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { if writeBarrier.enabled { @@ -77,10 +77,10 @@ func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Poi return old } -//go:linkname sync_atomic_CompareAndSwapUintptr sync_atomic.CompareAndSwapUintptr +//go:linkname sync_atomic_CompareAndSwapUintptr sync..z2fatomic.CompareAndSwapUintptr func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool -//go:linkname sync_atomic_CompareAndSwapPointer sync_atomic.CompareAndSwapPointer +//go:linkname sync_atomic_CompareAndSwapPointer sync..z2fatomic.CompareAndSwapPointer //go:nosplit func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { if writeBarrier.enabled { diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go index b1a7c3b..e7cf1b4 100644 --- a/libgo/go/runtime/cpuprof.go +++ b/libgo/go/runtime/cpuprof.go @@ -186,7 +186,7 @@ func CPUProfile() []byte { panic("CPUProfile no longer available") } -//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime_pprof.runtime_cyclesPerSecond +//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime..z2fpprof.runtime_cyclesPerSecond func runtime_pprof_runtime_cyclesPerSecond() int64 { return tickspersecond() } @@ -197,7 +197,7 @@ func runtime_pprof_runtime_cyclesPerSecond() int64 { // on has been returned, readProfile returns eof=true. // The caller must save the returned data and tags before calling readProfile again. // -//go:linkname runtime_pprof_readProfile runtime_pprof.readProfile +//go:linkname runtime_pprof_readProfile runtime..z2fpprof.readProfile func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) { lock(&cpuprof.lock) log := cpuprof.log diff --git a/libgo/go/runtime/debug/stack_test.go b/libgo/go/runtime/debug/stack_test.go index 67931d1..309eddd 100644 --- a/libgo/go/runtime/debug/stack_test.go +++ b/libgo/go/runtime/debug/stack_test.go @@ -51,10 +51,10 @@ func TestStack(t *testing.T) { n++ } n++ - frame("stack.go", "runtime_debug.Stack") + frame("stack.go", "debug.Stack") frame("stack_test.go", "ptrmethod") frame("stack_test.go", "method") - frame("stack_test.go", "runtime_debug_test.TestStack") + frame("stack_test.go", "test.TestStack") frame("testing.go", "") } diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go index e92ea39..5ebebf6 100644 --- a/libgo/go/runtime/heapdump.go +++ b/libgo/go/runtime/heapdump.go @@ -16,7 +16,7 @@ import ( "unsafe" ) -//go:linkname runtime_debug_WriteHeapDump runtime_debug.WriteHeapDump +//go:linkname runtime_debug_WriteHeapDump runtime..z2fdebug.WriteHeapDump func runtime_debug_WriteHeapDump(fd uintptr) { stopTheWorld("write heap dump") diff --git a/libgo/go/runtime/internal/atomic/atomic.c b/libgo/go/runtime/internal/atomic/atomic.c index 24820f2..b87fae9 100644 --- a/libgo/go/runtime/internal/atomic/atomic.c +++ b/libgo/go/runtime/internal/atomic/atomic.c @@ -7,7 +7,7 @@ #include "runtime.h" uint32_t Load (uint32_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Load") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load") __attribute__ ((no_split_stack)); uint32_t @@ -17,7 +17,7 @@ Load (uint32_t *ptr) } void *Loadp (void *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loadp") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loadp") __attribute__ ((no_split_stack)); void * @@ -27,7 +27,7 @@ Loadp (void *ptr) } uint64_t Load64 (uint64_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Load64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load64") __attribute__ ((no_split_stack)); uint64_t @@ -39,7 +39,7 @@ Load64 (uint64_t *ptr) } uintptr_t Loaduintptr (uintptr_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loaduintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loaduintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -49,7 +49,7 @@ Loaduintptr (uintptr_t *ptr) } uintgo Loaduint (uintgo *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loaduint") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loaduint") __attribute__ ((no_split_stack)); uintgo @@ -59,7 +59,7 @@ Loaduint (uintgo *ptr) } int64_t Loadint64 (int64_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loadint64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loadint64") __attribute__ ((no_split_stack)); int64_t @@ -71,7 +71,7 @@ Loadint64 (int64_t *ptr) } uint32_t Xadd (uint32_t *ptr, int32_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xadd") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadd") __attribute__ ((no_split_stack)); uint32_t @@ -81,7 +81,7 @@ Xadd (uint32_t *ptr, int32_t delta) } uint64_t Xadd64 (uint64_t *ptr, int64_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xadd64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadd64") __attribute__ ((no_split_stack)); uint64_t @@ -93,7 +93,7 @@ Xadd64 (uint64_t *ptr, int64_t delta) } uintptr_t Xadduintptr (uintptr_t *ptr, uintptr_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xadduintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadduintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -103,7 +103,7 @@ Xadduintptr (uintptr_t *ptr, uintptr_t delta) } int64_t Xaddint64 (int64_t *ptr, int64_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xaddint64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xaddint64") __attribute__ ((no_split_stack)); int64_t @@ -115,7 +115,7 @@ Xaddint64 (int64_t *ptr, int64_t delta) } uint32_t Xchg (uint32_t *ptr, uint32_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xchg") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchg") __attribute__ ((no_split_stack)); uint32_t @@ -125,7 +125,7 @@ Xchg (uint32_t *ptr, uint32_t new) } uint64_t Xchg64 (uint64_t *ptr, uint64_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xchg64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchg64") __attribute__ ((no_split_stack)); uint64_t @@ -137,7 +137,7 @@ Xchg64 (uint64_t *ptr, uint64_t new) } uintptr_t Xchguintptr (uintptr_t *ptr, uintptr_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xchguintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchguintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -147,7 +147,7 @@ Xchguintptr (uintptr_t *ptr, uintptr_t new) } void And8 (uint8_t *ptr, uint8_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.And8") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.And8") __attribute__ ((no_split_stack)); void @@ -157,7 +157,7 @@ And8 (uint8_t *ptr, uint8_t val) } void Or8 (uint8_t *ptr, uint8_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Or8") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Or8") __attribute__ ((no_split_stack)); void @@ -167,7 +167,7 @@ Or8 (uint8_t *ptr, uint8_t val) } _Bool Cas (uint32_t *ptr, uint32_t old, uint32_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Cas") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Cas") __attribute__ ((no_split_stack)); _Bool @@ -177,7 +177,7 @@ Cas (uint32_t *ptr, uint32_t old, uint32_t new) } _Bool Cas64 (uint64_t *ptr, uint64_t old, uint64_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Cas64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Cas64") __attribute__ ((no_split_stack)); _Bool @@ -189,7 +189,7 @@ Cas64 (uint64_t *ptr, uint64_t old, uint64_t new) } _Bool Casp1 (void **ptr, void *old, void *new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Casp1") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Casp1") __attribute__ ((no_split_stack)); _Bool @@ -199,7 +199,7 @@ Casp1 (void **ptr, void *old, void *new) } _Bool Casuintptr (uintptr_t *ptr, uintptr_t old, uintptr_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Casuintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Casuintptr") __attribute__ ((no_split_stack)); _Bool @@ -209,7 +209,7 @@ Casuintptr (uintptr_t *ptr, uintptr_t old, uintptr_t new) } void Store (uint32_t *ptr, uint32_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Store") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store") __attribute__ ((no_split_stack)); void @@ -219,7 +219,7 @@ Store (uint32_t *ptr, uint32_t val) } void Store64 (uint64_t *ptr, uint64_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Store64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store64") __attribute__ ((no_split_stack)); void @@ -231,7 +231,7 @@ Store64 (uint64_t *ptr, uint64_t val) } void Storeuintptr (uintptr_t *ptr, uintptr_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Storeuintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Storeuintptr") __attribute__ ((no_split_stack)); void @@ -241,7 +241,7 @@ Storeuintptr (uintptr_t *ptr, uintptr_t val) } void StorepNoWB (void *ptr, void *val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.StorepNoWB") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.StorepNoWB") __attribute__ ((no_split_stack)); void diff --git a/libgo/go/runtime/mgc.go b/libgo/go/runtime/mgc.go index 4ef982d..de84084 100644 --- a/libgo/go/runtime/mgc.go +++ b/libgo/go/runtime/mgc.go @@ -219,7 +219,7 @@ func gcenable() { memstats.enablegc = true // now that runtime is initialized, GC is okay } -//go:linkname setGCPercent runtime_debug.setGCPercent +//go:linkname setGCPercent runtime..z2fdebug.setGCPercent func setGCPercent(in int32) (out int32) { lock(&mheap_.lock) out = gcpercent diff --git a/libgo/go/runtime/mheap.go b/libgo/go/runtime/mheap.go index 65622f4..eb98083 100644 --- a/libgo/go/runtime/mheap.go +++ b/libgo/go/runtime/mheap.go @@ -1165,7 +1165,7 @@ func (h *mheap) scavenge(k int32, now, limit uint64) { } } -//go:linkname runtime_debug_freeOSMemory runtime_debug.freeOSMemory +//go:linkname runtime_debug_freeOSMemory runtime..z2fdebug.freeOSMemory func runtime_debug_freeOSMemory() { GC() systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) diff --git a/libgo/go/runtime/mstats.go b/libgo/go/runtime/mstats.go index f54ce9d..8aac850 100644 --- a/libgo/go/runtime/mstats.go +++ b/libgo/go/runtime/mstats.go @@ -477,7 +477,7 @@ func readmemstats_m(stats *MemStats) { stats.StackSys += stats.StackInuse } -//go:linkname readGCStats runtime_debug.readGCStats +//go:linkname readGCStats runtime..z2fdebug.readGCStats func readGCStats(pauses *[]uint64) { systemstack(func() { readGCStats_m(pauses) diff --git a/libgo/go/runtime/net_plan9.go b/libgo/go/runtime/net_plan9.go index 77ae8c6..907c319 100644 --- a/libgo/go/runtime/net_plan9.go +++ b/libgo/go/runtime/net_plan9.go @@ -8,12 +8,12 @@ import ( _ "unsafe" ) -//go:linkname runtime_ignoreHangup internal_poll.runtime_ignoreHangup +//go:linkname runtime_ignoreHangup internal..z2fpoll.runtime_ignoreHangup func runtime_ignoreHangup() { getg().m.ignoreHangup = true } -//go:linkname runtime_unignoreHangup internal_poll.runtime_unignoreHangup +//go:linkname runtime_unignoreHangup internal..z2fpoll.runtime_unignoreHangup func runtime_unignoreHangup(sig string) { getg().m.ignoreHangup = false } diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go index ab3d14d..6016b7d 100644 --- a/libgo/go/runtime/netpoll.go +++ b/libgo/go/runtime/netpoll.go @@ -85,7 +85,7 @@ var ( netpollWaiters uint32 ) -//go:linkname poll_runtime_pollServerInit internal_poll.runtime_pollServerInit +//go:linkname poll_runtime_pollServerInit internal..z2fpoll.runtime_pollServerInit func poll_runtime_pollServerInit() { netpollinit() atomic.Store(&netpollInited, 1) @@ -95,7 +95,7 @@ func netpollinited() bool { return atomic.Load(&netpollInited) != 0 } -//go:linkname poll_runtime_pollServerDescriptor internal_poll.runtime_pollServerDescriptor +//go:linkname poll_runtime_pollServerDescriptor internal..z2fpoll.runtime_pollServerDescriptor // poll_runtime_pollServerDescriptor returns the descriptor being used, // or ^uintptr(0) if the system does not use a poll descriptor. @@ -103,7 +103,7 @@ func poll_runtime_pollServerDescriptor() uintptr { return netpolldescriptor() } -//go:linkname poll_runtime_pollOpen internal_poll.runtime_pollOpen +//go:linkname poll_runtime_pollOpen internal..z2fpoll.runtime_pollOpen func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) { pd := pollcache.alloc() lock(&pd.lock) @@ -127,7 +127,7 @@ func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) { return pd, int(errno) } -//go:linkname poll_runtime_pollClose internal_poll.runtime_pollClose +//go:linkname poll_runtime_pollClose internal..z2fpoll.runtime_pollClose func poll_runtime_pollClose(pd *pollDesc) { if !pd.closing { throw("runtime: close polldesc w/o unblock") @@ -149,7 +149,7 @@ func (c *pollCache) free(pd *pollDesc) { unlock(&c.lock) } -//go:linkname poll_runtime_pollReset internal_poll.runtime_pollReset +//go:linkname poll_runtime_pollReset internal..z2fpoll.runtime_pollReset func poll_runtime_pollReset(pd *pollDesc, mode int) int { err := netpollcheckerr(pd, int32(mode)) if err != 0 { @@ -163,7 +163,7 @@ func poll_runtime_pollReset(pd *pollDesc, mode int) int { return 0 } -//go:linkname poll_runtime_pollWait internal_poll.runtime_pollWait +//go:linkname poll_runtime_pollWait internal..z2fpoll.runtime_pollWait func poll_runtime_pollWait(pd *pollDesc, mode int) int { err := netpollcheckerr(pd, int32(mode)) if err != 0 { @@ -185,7 +185,7 @@ func poll_runtime_pollWait(pd *pollDesc, mode int) int { return 0 } -//go:linkname poll_runtime_pollWaitCanceled internal_poll.runtime_pollWaitCanceled +//go:linkname poll_runtime_pollWaitCanceled internal..z2fpoll.runtime_pollWaitCanceled func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) { // This function is used only on windows after a failed attempt to cancel // a pending async IO operation. Wait for ioready, ignore closing or timeouts. @@ -193,7 +193,7 @@ func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) { } } -//go:linkname poll_runtime_pollSetDeadline internal_poll.runtime_pollSetDeadline +//go:linkname poll_runtime_pollSetDeadline internal..z2fpoll.runtime_pollSetDeadline func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { lock(&pd.lock) if pd.closing { @@ -263,7 +263,7 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { } } -//go:linkname poll_runtime_pollUnblock internal_poll.runtime_pollUnblock +//go:linkname poll_runtime_pollUnblock internal..z2fpoll.runtime_pollUnblock func poll_runtime_pollUnblock(pd *pollDesc) { lock(&pd.lock) if pd.closing { diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go index 5d77a1d..f428827 100644 --- a/libgo/go/runtime/pprof/mprof_test.go +++ b/libgo/go/runtime/pprof/mprof_test.go @@ -87,19 +87,19 @@ func TestMemoryProfiler(t *testing.T) { fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+ # 0x[0-9,a-f]+ pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:40 -# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:74 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:74 `, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+ # 0x[0-9,a-f]+ pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:21 -# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:72 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:72 `, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), // This should start with "0: 0" but gccgo's imprecise // GC means that sometimes the value is not collected. fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ # 0x[0-9,a-f]+ pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:27 -# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:73 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:73 `, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun), // This should start with "0: 0" but gccgo's imprecise diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index 77d379b..bb16924 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -4670,7 +4670,7 @@ func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { return gp } -//go:linkname setMaxThreads runtime_debug.setMaxThreads +//go:linkname setMaxThreads runtime..z2fdebug.setMaxThreads func setMaxThreads(in int) (out int) { lock(&sched.lock) out = int(sched.maxmcount) @@ -4716,13 +4716,13 @@ func sync_runtime_procUnpin() { procUnpin() } -//go:linkname sync_atomic_runtime_procPin sync_atomic.runtime_procPin +//go:linkname sync_atomic_runtime_procPin sync..z2fatomic.runtime_procPin //go:nosplit func sync_atomic_runtime_procPin() int { return procPin() } -//go:linkname sync_atomic_runtime_procUnpin sync_atomic.runtime_procUnpin +//go:linkname sync_atomic_runtime_procUnpin sync..z2fatomic.runtime_procUnpin //go:nosplit func sync_atomic_runtime_procUnpin() { procUnpin() diff --git a/libgo/go/runtime/proflabel.go b/libgo/go/runtime/proflabel.go index ff73fe4..fc655cc 100644 --- a/libgo/go/runtime/proflabel.go +++ b/libgo/go/runtime/proflabel.go @@ -8,7 +8,7 @@ import "unsafe" var labelSync uintptr -//go:linkname runtime_setProfLabel runtime_pprof.runtime_setProfLabel +//go:linkname runtime_setProfLabel runtime..z2fpprof.runtime_setProfLabel func runtime_setProfLabel(labels unsafe.Pointer) { // Introduce race edge for read-back via profile. // This would more properly use &getg().labels as the sync address, @@ -34,7 +34,7 @@ func runtime_setProfLabel(labels unsafe.Pointer) { getg().labels = labels } -//go:linkname runtime_getProfLabel runtime_pprof.runtime_getProfLabel +//go:linkname runtime_getProfLabel runtime..z2fpprof.runtime_getProfLabel func runtime_getProfLabel() unsafe.Pointer { return getg().labels } diff --git a/libgo/go/runtime/rdebug.go b/libgo/go/runtime/rdebug.go index 76535a9..358df11 100644 --- a/libgo/go/runtime/rdebug.go +++ b/libgo/go/runtime/rdebug.go @@ -11,14 +11,14 @@ import _ "unsafe" // for go:linkname // maxstacksize. var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real -//go:linkname setMaxStack runtime_debug.setMaxStack +//go:linkname setMaxStack runtime..z2fdebug.setMaxStack func setMaxStack(in int) (out int) { out = int(maxstacksize) maxstacksize = uintptr(in) return out } -//go:linkname setPanicOnFault runtime_debug.setPanicOnFault +//go:linkname setPanicOnFault runtime..z2fdebug.setPanicOnFault func setPanicOnFault(new bool) (old bool) { _g_ := getg() old = _g_.paniconfault diff --git a/libgo/go/runtime/runtime1.go b/libgo/go/runtime/runtime1.go index 8b1b0a0..050f180 100644 --- a/libgo/go/runtime/runtime1.go +++ b/libgo/go/runtime/runtime1.go @@ -413,7 +413,7 @@ func parsedebugvars() { traceback_env = traceback_cache } -//go:linkname setTraceback runtime_debug.SetTraceback +//go:linkname setTraceback runtime..z2fdebug.SetTraceback func setTraceback(level string) { var t uint32 switch level { diff --git a/libgo/go/runtime/sema.go b/libgo/go/runtime/sema.go index cb7d3cd..273e8aa 100644 --- a/libgo/go/runtime/sema.go +++ b/libgo/go/runtime/sema.go @@ -56,7 +56,7 @@ func sync_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile) } -//go:linkname poll_runtime_Semacquire internal_poll.runtime_Semacquire +//go:linkname poll_runtime_Semacquire internal..z2fpoll.runtime_Semacquire func poll_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile) } @@ -71,7 +71,7 @@ func sync_runtime_SemacquireMutex(addr *uint32, lifo bool) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile) } -//go:linkname poll_runtime_Semrelease internal_poll.runtime_Semrelease +//go:linkname poll_runtime_Semrelease internal..z2fpoll.runtime_Semrelease func poll_runtime_Semrelease(addr *uint32) { semrelease(addr) } diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go index cf926a9..1a29b20 100644 --- a/libgo/go/runtime/sigqueue.go +++ b/libgo/go/runtime/sigqueue.go @@ -117,7 +117,7 @@ Send: // Called to receive the next queued signal. // Must only be called from a single goroutine at a time. -//go:linkname signal_recv os_signal.signal_recv +//go:linkname signal_recv os..z2fsignal.signal_recv func signal_recv() uint32 { for { // Serve any signals from local copy. @@ -161,7 +161,7 @@ func signal_recv() uint32 { // the signal(s) in question, and here we are just waiting to make sure // that all the signals have been delivered to the user channels // by the os/signal package. -//go:linkname signalWaitUntilIdle os_signal.signalWaitUntilIdle +//go:linkname signalWaitUntilIdle os..z2fsignal.signalWaitUntilIdle func signalWaitUntilIdle() { // Although the signals we care about have been removed from // sig.wanted, it is possible that another thread has received @@ -181,7 +181,7 @@ func signalWaitUntilIdle() { } // Must only be called from a single goroutine at a time. -//go:linkname signal_enable os_signal.signal_enable +//go:linkname signal_enable os..z2fsignal.signal_enable func signal_enable(s uint32) { if !sig.inuse { // The first call to signal_enable is for us @@ -208,7 +208,7 @@ func signal_enable(s uint32) { } // Must only be called from a single goroutine at a time. -//go:linkname signal_disable os_signal.signal_disable +//go:linkname signal_disable os..z2fsignal.signal_disable func signal_disable(s uint32) { if s >= uint32(len(sig.wanted)*32) { return @@ -221,7 +221,7 @@ func signal_disable(s uint32) { } // Must only be called from a single goroutine at a time. -//go:linkname signal_ignore os_signal.signal_ignore +//go:linkname signal_ignore os..z2fsignal.signal_ignore func signal_ignore(s uint32) { if s >= uint32(len(sig.wanted)*32) { return @@ -248,7 +248,7 @@ func sigInitIgnored(s uint32) { } // Checked by signal handlers. -//go:linkname signal_ignored os_signal.signal_ignored +//go:linkname signal_ignored os..z2fsignal.signal_ignored func signal_ignored(s uint32) bool { i := atomic.Load(&sig.ignored[s/32]) return i&(1<<(s&31)) != 0 diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go index 861921c..d137122 100644 --- a/libgo/go/runtime/symtab.go +++ b/libgo/go/runtime/symtab.go @@ -83,6 +83,11 @@ func (ci *Frames) Next() (frame Frame, more bool) { if function == "" && file == "" { return Frame{}, more } + + // Demangle function name if needed. + function = demangleSymbol(function) + + // Create entry. entry := funcentry(pc - 1) f := &Func{name: function, entry: entry} @@ -182,6 +187,75 @@ func (f *Func) FileLine(pc uintptr) (file string, line int) { return file, line } +func hexval(b byte) uint { + if b >= '0' && b <= '9' { + return uint(b - '0') + } + if b >= 'a' && b <= 'f' { + return uint(b-'a') + 10 + } + return 0 +} + +func hexDigitsToRune(digits []byte, ndig int) rune { + result := uint(0) + for i := 0; i < ndig; i++ { + result <<= uint(4) + result |= hexval(digits[i]) + } + return rune(result) +} + +// Perform an in-place decoding on the input byte slice. This looks +// for "..z<hex 2 >", "..u<hex x 4>" and "..U<hex x 8>" and overwrites +// with the encoded bytes corresponding to the unicode in question. +// Return value is the number of bytes taken by the result. + +func decodeIdentifier(bsl []byte) int { + j := 0 + for i := 0; i < len(bsl); i++ { + b := bsl[i] + + if i+1 < len(bsl) && bsl[i] == '.' && bsl[i+1] == '.' { + if i+4 < len(bsl) && bsl[i+2] == 'z' { + digits := bsl[i+3:] + r := hexDigitsToRune(digits, 2) + nc := encoderune(bsl[j:], r) + j += nc + i += 4 + continue + } else if i+6 < len(bsl) && bsl[i+2] == 'u' { + digits := bsl[i+3:] + r := hexDigitsToRune(digits, 4) + nc := encoderune(bsl[j:], r) + j += nc + i += 6 + continue + } else if i+10 < len(bsl) && bsl[i+2] == 'U' { + digits := bsl[i+3:] + r := hexDigitsToRune(digits, 8) + nc := encoderune(bsl[j:], r) + j += nc + i += 10 + continue + } + } + bsl[j] = b + j += 1 + } + return j +} + +// Demangle a function symbol. Applies the reverse of go_encode_id() +// as used in the compiler. + +func demangleSymbol(s string) string { + bsl := []byte(s) + nchars := decodeIdentifier(bsl) + bsl = bsl[:nchars] + return string(bsl) +} + // implemented in go-caller.c func funcfileline(uintptr, int32) (string, string, int) func funcentry(uintptr) uintptr diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go index a95d95b..ea61baa 100644 --- a/libgo/go/runtime/time.go +++ b/libgo/go/runtime/time.go @@ -441,7 +441,7 @@ func badTimer() { // Entry points for net, time to call nanotime. -//go:linkname poll_runtimeNano internal_poll.runtimeNano +//go:linkname poll_runtimeNano internal..z2fpoll.runtimeNano func poll_runtimeNano() int64 { return nanotime() } diff --git a/libgo/go/runtime/trace.go b/libgo/go/runtime/trace.go index 7aed9a9..530d5e4 100644 --- a/libgo/go/runtime/trace.go +++ b/libgo/go/runtime/trace.go @@ -1143,7 +1143,7 @@ func traceNextGC() { // To access runtime functions from runtime/trace. // See runtime/trace/annotation.go -//go:linkname trace_userTaskCreate runtime_trace.userTaskCreate +//go:linkname trace_userTaskCreate runtime..z2ftrace.userTaskCreate func trace_userTaskCreate(id, parentID uint64, taskType string) { if !trace.enabled { return @@ -1161,12 +1161,12 @@ func trace_userTaskCreate(id, parentID uint64, taskType string) { traceReleaseBuffer(pid) } -//go:linkname trace_userTaskEnd runtime_trace.userTaskEnd +//go:linkname trace_userTaskEnd runtime..z2ftrace.userTaskEnd func trace_userTaskEnd(id uint64) { traceEvent(traceEvUserTaskEnd, 2, id) } -//go:linkname trace_userRegion runtime_trace.userRegion +//go:linkname trace_userRegion runtime..z2ftrace.userRegion func trace_userRegion(id, mode uint64, name string) { if !trace.enabled { return @@ -1183,7 +1183,7 @@ func trace_userRegion(id, mode uint64, name string) { traceReleaseBuffer(pid) } -//go:linkname trace_userLog runtime_trace.userLog +//go:linkname trace_userLog runtime..z2ftrace.userLog func trace_userLog(id uint64, category, message string) { if !trace.enabled { return diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go index e97071e..7347cea 100644 --- a/libgo/go/runtime/traceback_gccgo.go +++ b/libgo/go/runtime/traceback_gccgo.go @@ -110,9 +110,14 @@ func showframe(name string, gp *g) bool { } // isExportedRuntime reports whether name is an exported runtime function. -// It is only for runtime functions, so ASCII A-Z is fine. +// It is only for runtime functions, so ASCII A-Z is fine. Here also check +// for mangled functions from runtime/<...>, which will be prefixed with +// "runtime..z2f". func isExportedRuntime(name string) bool { const n = len("runtime.") + if hasprefix(name, "runtime..z2f") { + return true + } return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' } |