diff options
author | Ian Lance Taylor <iant@golang.org> | 2020-11-16 20:06:53 -0800 |
---|---|---|
committer | Ian Lance Taylor <iant@golang.org> | 2020-11-20 12:44:35 -0800 |
commit | a01dda3c23b836754814fab1cab949a1bbc641e8 (patch) | |
tree | 826310b88323c0f636baf89393557fde6a56fdeb /libgo/go/runtime | |
parent | 90bf60c3c24c6c99ebbecf9d08a6d0d916d73721 (diff) | |
download | gcc-a01dda3c23b836754814fab1cab949a1bbc641e8.zip gcc-a01dda3c23b836754814fab1cab949a1bbc641e8.tar.gz gcc-a01dda3c23b836754814fab1cab949a1bbc641e8.tar.bz2 |
compiler, libgo: change mangling scheme
Overhaul the mangling scheme to avoid ambiguities if the package path
contains a dot. Instead of using dot both to separate components and
to mangle characters, use dot only to separate components and use
underscore to mangle characters.
For golang/go#41862
Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/271726
Diffstat (limited to 'libgo/go/runtime')
29 files changed, 174 insertions, 148 deletions
diff --git a/libgo/go/runtime/atomic_pointer.go b/libgo/go/runtime/atomic_pointer.go index 49b0f2b..0295913 100644 --- a/libgo/go/runtime/atomic_pointer.go +++ b/libgo/go/runtime/atomic_pointer.go @@ -39,10 +39,10 @@ func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { // We cannot just call the runtime routines, because the race detector expects // to be able to intercept the sync/atomic forms but not the runtime forms. -//go:linkname sync_atomic_StoreUintptr sync..z2fatomic.StoreUintptr +//go:linkname sync_atomic_StoreUintptr sync_1atomic.StoreUintptr func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr) -//go:linkname sync_atomic_StorePointer sync..z2fatomic.StorePointer +//go:linkname sync_atomic_StorePointer sync_1atomic.StorePointer //go:nosplit func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { if writeBarrier.enabled { @@ -51,10 +51,10 @@ func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) } -//go:linkname sync_atomic_SwapUintptr sync..z2fatomic.SwapUintptr +//go:linkname sync_atomic_SwapUintptr sync_1atomic.SwapUintptr func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr -//go:linkname sync_atomic_SwapPointer sync..z2fatomic.SwapPointer +//go:linkname sync_atomic_SwapPointer sync_1atomic.SwapPointer //go:nosplit func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { if writeBarrier.enabled { @@ -64,10 +64,10 @@ func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Poi return old } -//go:linkname sync_atomic_CompareAndSwapUintptr sync..z2fatomic.CompareAndSwapUintptr +//go:linkname sync_atomic_CompareAndSwapUintptr sync_1atomic.CompareAndSwapUintptr func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool -//go:linkname sync_atomic_CompareAndSwapPointer sync..z2fatomic.CompareAndSwapPointer +//go:linkname sync_atomic_CompareAndSwapPointer sync_1atomic.CompareAndSwapPointer //go:nosplit func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { if writeBarrier.enabled { diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go index b909d47..8e104f1 100644 --- a/libgo/go/runtime/chan.go +++ b/libgo/go/runtime/chan.go @@ -774,7 +774,7 @@ func reflect_chanlen(c *hchan) int { return int(c.qcount) } -//go:linkname reflectlite_chanlen internal..z2freflectlite.chanlen +//go:linkname reflectlite_chanlen internal_1reflectlite.chanlen func reflectlite_chanlen(c *hchan) int { if c == nil { return 0 diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go index d395210..43f0a67 100644 --- a/libgo/go/runtime/cpuprof.go +++ b/libgo/go/runtime/cpuprof.go @@ -189,7 +189,7 @@ func CPUProfile() []byte { panic("CPUProfile no longer available") } -//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime..z2fpprof.runtime_cyclesPerSecond +//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime_1pprof.runtime__cyclesPerSecond func runtime_pprof_runtime_cyclesPerSecond() int64 { return tickspersecond() } @@ -200,7 +200,7 @@ func runtime_pprof_runtime_cyclesPerSecond() int64 { // on has been returned, readProfile returns eof=true. // The caller must save the returned data and tags before calling readProfile again. // -//go:linkname runtime_pprof_readProfile runtime..z2fpprof.readProfile +//go:linkname runtime_pprof_readProfile runtime_1pprof.readProfile func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) { lock(&cpuprof.lock) log := cpuprof.log diff --git a/libgo/go/runtime/debug.go b/libgo/go/runtime/debug.go index 1202e36..ff76580 100644 --- a/libgo/go/runtime/debug.go +++ b/libgo/go/runtime/debug.go @@ -66,7 +66,7 @@ func NumGoroutine() int { // added. func Fieldtrack(map[string]bool) -//go:linkname debug_modinfo runtime..z2fdebug.modinfo +//go:linkname debug_modinfo runtime_1debug.modinfo func debug_modinfo() string { return modinfo } diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go index e8f16e9..816d93c 100644 --- a/libgo/go/runtime/heapdump.go +++ b/libgo/go/runtime/heapdump.go @@ -16,7 +16,7 @@ import ( "unsafe" ) -//go:linkname runtime_debug_WriteHeapDump runtime..z2fdebug.WriteHeapDump +//go:linkname runtime_debug_WriteHeapDump runtime_1debug.WriteHeapDump func runtime_debug_WriteHeapDump(fd uintptr) { stopTheWorld("write heap dump") diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go index 5667ddb..f9df1e0 100644 --- a/libgo/go/runtime/iface.go +++ b/libgo/go/runtime/iface.go @@ -505,7 +505,7 @@ func reflect_ifaceE2I(inter *interfacetype, e eface, dst *iface) { dst.data = e.data } -//go:linkname reflectlite_ifaceE2I internal..z2freflectlite.ifaceE2I +//go:linkname reflectlite_ifaceE2I internal_1reflectlite.ifaceE2I func reflectlite_ifaceE2I(inter *interfacetype, e eface, dst *iface) { t := e._type if t == nil { diff --git a/libgo/go/runtime/internal/atomic/atomic.c b/libgo/go/runtime/internal/atomic/atomic.c index 8ae4d7b..9fed1a8 100644 --- a/libgo/go/runtime/internal/atomic/atomic.c +++ b/libgo/go/runtime/internal/atomic/atomic.c @@ -7,7 +7,7 @@ #include "runtime.h" uint32_t Load (uint32_t *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Load") __attribute__ ((no_split_stack)); uint32_t @@ -17,7 +17,7 @@ Load (uint32_t *ptr) } void *Loadp (void *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loadp") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Loadp") __attribute__ ((no_split_stack)); void * @@ -27,7 +27,7 @@ Loadp (void *ptr) } uint8_t Load8 (uint8_t *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load8") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Load8") __attribute__ ((no_split_stack)); uint8_t @@ -37,7 +37,7 @@ Load8 (uint8_t *ptr) } uint64_t Load64 (uint64_t *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load64") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Load64") __attribute__ ((no_split_stack)); uint64_t @@ -49,7 +49,7 @@ Load64 (uint64_t *ptr) } uint32_t LoadAcq (uint32_t *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.LoadAcq") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.LoadAcq") __attribute__ ((no_split_stack)); uint32_t @@ -59,7 +59,7 @@ LoadAcq (uint32_t *ptr) } uintptr_t Loaduintptr (uintptr_t *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loaduintptr") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Loaduintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -69,7 +69,7 @@ Loaduintptr (uintptr_t *ptr) } uintgo Loaduint (uintgo *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loaduint") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Loaduint") __attribute__ ((no_split_stack)); uintgo @@ -79,7 +79,7 @@ Loaduint (uintgo *ptr) } int64_t Loadint64 (int64_t *ptr) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loadint64") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Loadint64") __attribute__ ((no_split_stack)); int64_t @@ -91,7 +91,7 @@ Loadint64 (int64_t *ptr) } uint32_t Xadd (uint32_t *ptr, int32_t delta) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadd") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Xadd") __attribute__ ((no_split_stack)); uint32_t @@ -101,7 +101,7 @@ Xadd (uint32_t *ptr, int32_t delta) } uint64_t Xadd64 (uint64_t *ptr, int64_t delta) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadd64") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Xadd64") __attribute__ ((no_split_stack)); uint64_t @@ -113,7 +113,7 @@ Xadd64 (uint64_t *ptr, int64_t delta) } uintptr_t Xadduintptr (uintptr_t *ptr, uintptr_t delta) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadduintptr") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Xadduintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -123,7 +123,7 @@ Xadduintptr (uintptr_t *ptr, uintptr_t delta) } int64_t Xaddint64 (int64_t *ptr, int64_t delta) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xaddint64") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Xaddint64") __attribute__ ((no_split_stack)); int64_t @@ -135,7 +135,7 @@ Xaddint64 (int64_t *ptr, int64_t delta) } uint32_t Xchg (uint32_t *ptr, uint32_t new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchg") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Xchg") __attribute__ ((no_split_stack)); uint32_t @@ -145,7 +145,7 @@ Xchg (uint32_t *ptr, uint32_t new) } uint64_t Xchg64 (uint64_t *ptr, uint64_t new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchg64") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Xchg64") __attribute__ ((no_split_stack)); uint64_t @@ -157,7 +157,7 @@ Xchg64 (uint64_t *ptr, uint64_t new) } uintptr_t Xchguintptr (uintptr_t *ptr, uintptr_t new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchguintptr") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Xchguintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -167,7 +167,7 @@ Xchguintptr (uintptr_t *ptr, uintptr_t new) } void And8 (uint8_t *ptr, uint8_t val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.And8") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.And8") __attribute__ ((no_split_stack)); void @@ -177,7 +177,7 @@ And8 (uint8_t *ptr, uint8_t val) } void Or8 (uint8_t *ptr, uint8_t val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Or8") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Or8") __attribute__ ((no_split_stack)); void @@ -187,7 +187,7 @@ Or8 (uint8_t *ptr, uint8_t val) } _Bool Cas (uint32_t *ptr, uint32_t old, uint32_t new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Cas") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Cas") __attribute__ ((no_split_stack)); _Bool @@ -197,7 +197,7 @@ Cas (uint32_t *ptr, uint32_t old, uint32_t new) } _Bool Cas64 (uint64_t *ptr, uint64_t old, uint64_t new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Cas64") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Cas64") __attribute__ ((no_split_stack)); _Bool @@ -209,7 +209,7 @@ Cas64 (uint64_t *ptr, uint64_t old, uint64_t new) } _Bool CasRel (uint32_t *ptr, uint32_t old, uint32_t new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.CasRel") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.CasRel") __attribute__ ((no_split_stack)); _Bool @@ -219,7 +219,7 @@ CasRel (uint32_t *ptr, uint32_t old, uint32_t new) } _Bool Casp1 (void **ptr, void *old, void *new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Casp1") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Casp1") __attribute__ ((no_split_stack)); _Bool @@ -229,7 +229,7 @@ Casp1 (void **ptr, void *old, void *new) } _Bool Casuintptr (uintptr_t *ptr, uintptr_t old, uintptr_t new) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Casuintptr") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Casuintptr") __attribute__ ((no_split_stack)); _Bool @@ -239,7 +239,7 @@ Casuintptr (uintptr_t *ptr, uintptr_t old, uintptr_t new) } void Store (uint32_t *ptr, uint32_t val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Store") __attribute__ ((no_split_stack)); void @@ -249,7 +249,7 @@ Store (uint32_t *ptr, uint32_t val) } void Store8 (uint8_t *ptr, uint8_t val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store8") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Store8") __attribute__ ((no_split_stack)); void @@ -259,7 +259,7 @@ Store8 (uint8_t *ptr, uint8_t val) } void Store64 (uint64_t *ptr, uint64_t val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store64") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Store64") __attribute__ ((no_split_stack)); void @@ -271,7 +271,7 @@ Store64 (uint64_t *ptr, uint64_t val) } void StoreRel (uint32_t *ptr, uint32_t val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.StoreRel") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.StoreRel") __attribute__ ((no_split_stack)); void @@ -281,7 +281,7 @@ StoreRel (uint32_t *ptr, uint32_t val) } void Storeuintptr (uintptr_t *ptr, uintptr_t val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Storeuintptr") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.Storeuintptr") __attribute__ ((no_split_stack)); void @@ -291,7 +291,7 @@ Storeuintptr (uintptr_t *ptr, uintptr_t val) } void StorepNoWB (void *ptr, void *val) - __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.StorepNoWB") + __asm__ (GOSYM_PREFIX "runtime_1internal_1atomic.StorepNoWB") __attribute__ ((no_split_stack)); void diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go index 81351ee..feb043a 100644 --- a/libgo/go/runtime/malloc.go +++ b/libgo/go/runtime/malloc.go @@ -1220,12 +1220,12 @@ func newobject(typ *_type) unsafe.Pointer { return mallocgc(typ.size, typ, true) } -//go:linkname reflect_unsafe_New reflect.unsafe_New +//go:linkname reflect_unsafe_New reflect.unsafe__New func reflect_unsafe_New(typ *_type) unsafe.Pointer { return mallocgc(typ.size, typ, true) } -//go:linkname reflectlite_unsafe_New internal..z2freflectlite.unsafe_New +//go:linkname reflectlite_unsafe_New internal_1reflectlite.unsafe__New func reflectlite_unsafe_New(typ *_type) unsafe.Pointer { return mallocgc(typ.size, typ, true) } @@ -1242,7 +1242,7 @@ func newarray(typ *_type, n int) unsafe.Pointer { return mallocgc(mem, typ, true) } -//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray +//go:linkname reflect_unsafe_NewArray reflect.unsafe__NewArray func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { return newarray(typ, n) } diff --git a/libgo/go/runtime/map.go b/libgo/go/runtime/map.go index b829771..1155fee 100644 --- a/libgo/go/runtime/map.go +++ b/libgo/go/runtime/map.go @@ -1417,7 +1417,7 @@ func reflect_maplen(h *hmap) int { return h.count } -//go:linkname reflectlite_maplen internal..z2freflectlite.maplen +//go:linkname reflectlite_maplen internal_1reflectlite.maplen func reflectlite_maplen(h *hmap) int { if h == nil { return 0 diff --git a/libgo/go/runtime/mbarrier.go b/libgo/go/runtime/mbarrier.go index 836f85a..3bd8b34 100644 --- a/libgo/go/runtime/mbarrier.go +++ b/libgo/go/runtime/mbarrier.go @@ -192,7 +192,7 @@ func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) { typedmemmove(typ, dst, src) } -//go:linkname reflectlite_typedmemmove internal..z2freflectlite.typedmemmove +//go:linkname reflectlite_typedmemmove internal_1reflectlite.typedmemmove func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) { reflect_typedmemmove(typ, dst, src) } diff --git a/libgo/go/runtime/mgc.go b/libgo/go/runtime/mgc.go index 9dd7bff..72479c2 100644 --- a/libgo/go/runtime/mgc.go +++ b/libgo/go/runtime/mgc.go @@ -223,7 +223,7 @@ func gcenable() { memstats.enablegc = true // now that runtime is initialized, GC is okay } -//go:linkname setGCPercent runtime..z2fdebug.setGCPercent +//go:linkname setGCPercent runtime_1debug.setGCPercent func setGCPercent(in int32) (out int32) { // Run on the system stack since we grab the heap lock. systemstack(func() { @@ -2238,7 +2238,7 @@ func gcResetMarkState() { var poolcleanup func() -//go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup +//go:linkname sync_runtime_registerPoolCleanup sync.runtime__registerPoolCleanup func sync_runtime_registerPoolCleanup(f func()) { poolcleanup = f } diff --git a/libgo/go/runtime/mheap.go b/libgo/go/runtime/mheap.go index e73ee32..755efd1 100644 --- a/libgo/go/runtime/mheap.go +++ b/libgo/go/runtime/mheap.go @@ -1502,7 +1502,7 @@ func (h *mheap) scavengeAll() { } } -//go:linkname runtime_debug_freeOSMemory runtime..z2fdebug.freeOSMemory +//go:linkname runtime_debug_freeOSMemory runtime_1debug.freeOSMemory func runtime_debug_freeOSMemory() { GC() systemstack(func() { mheap_.scavengeAll() }) diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go index a4b135d..afacf8f 100644 --- a/libgo/go/runtime/mprof.go +++ b/libgo/go/runtime/mprof.go @@ -942,7 +942,7 @@ func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return } -//go:linkname runtime_goroutineProfileWithLabels runtime..z2fpprof.runtime_goroutineProfileWithLabels +//go:linkname runtime_goroutineProfileWithLabels runtime_1pprof.runtime__goroutineProfileWithLabels func runtime_goroutineProfileWithLabels(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { return goroutineProfileWithLabels(p, labels) } diff --git a/libgo/go/runtime/mstats.go b/libgo/go/runtime/mstats.go index 4e2c66ce..85a0861 100644 --- a/libgo/go/runtime/mstats.go +++ b/libgo/go/runtime/mstats.go @@ -468,7 +468,7 @@ func readmemstats_m(stats *MemStats) { stats.StackSys += stats.StackInuse } -//go:linkname readGCStats runtime..z2fdebug.readGCStats +//go:linkname readGCStats runtime_1debug.readGCStats func readGCStats(pauses *[]uint64) { systemstack(func() { readGCStats_m(pauses) diff --git a/libgo/go/runtime/net_plan9.go b/libgo/go/runtime/net_plan9.go index 907c319..38ff5a4 100644 --- a/libgo/go/runtime/net_plan9.go +++ b/libgo/go/runtime/net_plan9.go @@ -8,12 +8,12 @@ import ( _ "unsafe" ) -//go:linkname runtime_ignoreHangup internal..z2fpoll.runtime_ignoreHangup +//go:linkname runtime_ignoreHangup internal_1poll.runtime__ignoreHangup func runtime_ignoreHangup() { getg().m.ignoreHangup = true } -//go:linkname runtime_unignoreHangup internal..z2fpoll.runtime_unignoreHangup +//go:linkname runtime_unignoreHangup internal_1poll.runtime__unignoreHangup func runtime_unignoreHangup(sig string) { getg().m.ignoreHangup = false } diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go index 72a136d..da00b57 100644 --- a/libgo/go/runtime/netpoll.go +++ b/libgo/go/runtime/netpoll.go @@ -113,7 +113,7 @@ var ( netpollWaiters uint32 ) -//go:linkname poll_runtime_pollServerInit internal..z2fpoll.runtime_pollServerInit +//go:linkname poll_runtime_pollServerInit internal_1poll.runtime__pollServerInit func poll_runtime_pollServerInit() { netpollGenericInit() } @@ -134,7 +134,7 @@ func netpollinited() bool { return atomic.Load(&netpollInited) != 0 } -//go:linkname poll_runtime_isPollServerDescriptor internal..z2fpoll.runtime_isPollServerDescriptor +//go:linkname poll_runtime_isPollServerDescriptor internal_1poll.runtime__isPollServerDescriptor // poll_runtime_isPollServerDescriptor reports whether fd is a // descriptor being used by netpoll. @@ -142,7 +142,7 @@ func poll_runtime_isPollServerDescriptor(fd uintptr) bool { return netpollIsPollDescriptor(fd) } -//go:linkname poll_runtime_pollOpen internal..z2fpoll.runtime_pollOpen +//go:linkname poll_runtime_pollOpen internal_1poll.runtime__pollOpen func poll_runtime_pollOpen(fd uintptr) (uintptr, int) { pd := pollcache.alloc() lock(&pd.lock) @@ -169,7 +169,7 @@ func poll_runtime_pollOpen(fd uintptr) (uintptr, int) { return uintptr(unsafe.Pointer(pd)), int(errno) } -//go:linkname poll_runtime_pollClose internal..z2fpoll.runtime_pollClose +//go:linkname poll_runtime_pollClose internal_1poll.runtime__pollClose func poll_runtime_pollClose(ctx uintptr) { pd := (*pollDesc)(unsafe.Pointer(ctx)) if !pd.closing { @@ -195,7 +195,7 @@ func (c *pollCache) free(pd *pollDesc) { // poll_runtime_pollReset, which is internal/poll.runtime_pollReset, // prepares a descriptor for polling in mode, which is 'r' or 'w'. // This returns an error code; the codes are defined above. -//go:linkname poll_runtime_pollReset internal..z2fpoll.runtime_pollReset +//go:linkname poll_runtime_pollReset internal_1poll.runtime__pollReset func poll_runtime_pollReset(ctx uintptr, mode int) int { pd := (*pollDesc)(unsafe.Pointer(ctx)) errcode := netpollcheckerr(pd, int32(mode)) @@ -214,7 +214,7 @@ func poll_runtime_pollReset(ctx uintptr, mode int) int { // waits for a descriptor to be ready for reading or writing, // according to mode, which is 'r' or 'w'. // This returns an error code; the codes are defined above. -//go:linkname poll_runtime_pollWait internal..z2fpoll.runtime_pollWait +//go:linkname poll_runtime_pollWait internal_1poll.runtime__pollWait func poll_runtime_pollWait(ctx uintptr, mode int) int { pd := (*pollDesc)(unsafe.Pointer(ctx)) errcode := netpollcheckerr(pd, int32(mode)) @@ -237,7 +237,7 @@ func poll_runtime_pollWait(ctx uintptr, mode int) int { return pollNoError } -//go:linkname poll_runtime_pollWaitCanceled internal..z2fpoll.runtime_pollWaitCanceled +//go:linkname poll_runtime_pollWaitCanceled internal_1poll.runtime__pollWaitCanceled func poll_runtime_pollWaitCanceled(ctx uintptr, mode int) { pd := (*pollDesc)(unsafe.Pointer(ctx)) // This function is used only on windows after a failed attempt to cancel @@ -246,7 +246,7 @@ func poll_runtime_pollWaitCanceled(ctx uintptr, mode int) { } } -//go:linkname poll_runtime_pollSetDeadline internal..z2fpoll.runtime_pollSetDeadline +//go:linkname poll_runtime_pollSetDeadline internal_1poll.runtime__pollSetDeadline func poll_runtime_pollSetDeadline(ctx uintptr, d int64, mode int) { pd := (*pollDesc)(unsafe.Pointer(ctx)) lock(&pd.lock) @@ -330,7 +330,7 @@ func poll_runtime_pollSetDeadline(ctx uintptr, d int64, mode int) { } } -//go:linkname poll_runtime_pollUnblock internal..z2fpoll.runtime_pollUnblock +//go:linkname poll_runtime_pollUnblock internal_1poll.runtime__pollUnblock func poll_runtime_pollUnblock(ctx uintptr) { pd := (*pollDesc)(unsafe.Pointer(ctx)) lock(&pd.lock) diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go index 625ab7de..83bf572 100644 --- a/libgo/go/runtime/pprof/mprof_test.go +++ b/libgo/go/runtime/pprof/mprof_test.go @@ -91,35 +91,35 @@ func TestMemoryProfiler(t *testing.T) { stk []string legacy string }{{ - stk: []string{"pprof.allocatePersistent1K", "runtime/pprof.TestMemoryProfiler"}, + stk: []string{"runtime/pprof.allocatePersistent1K", "runtime/pprof.TestMemoryProfiler"}, legacy: fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:47 +# 0x[0-9,a-f]+ runtime/pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:47 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:82 `, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), }, { - stk: []string{"pprof.allocateTransient1M", "runtime/pprof.TestMemoryProfiler"}, + stk: []string{"runtime/pprof.allocateTransient1M", "runtime/pprof.TestMemoryProfiler"}, legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:24 +# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:24 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:79 `, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), }, { - stk: []string{"pprof.allocateTransient2M", "runtime/pprof.TestMemoryProfiler"}, + stk: []string{"runtime/pprof.allocateTransient2M", "runtime/pprof.TestMemoryProfiler"}, // This should start with "0: 0" but gccgo's imprecise // GC means that sometimes the value is not collected. legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:30 +# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:30 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:80 `, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun), }, { - stk: []string{"pprof.allocateTransient2MInline", "runtime/pprof.TestMemoryProfiler"}, + stk: []string{"runtime/pprof.allocateTransient2MInline", "runtime/pprof.TestMemoryProfiler"}, legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ -# 0x[0-9,a-f]+ pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*/mprof_test.go:34 +# 0x[0-9,a-f]+ runtime/pprof\.allocateTransient2MInline\+0x[0-9,a-f]+ .*/mprof_test.go:34 # 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:81 `, memoryProfilerRun, (4<<20)*memoryProfilerRun, memoryProfilerRun, (4<<20)*memoryProfilerRun), }, { - stk: []string{"pprof.allocateReflectTransient"}, + stk: []string{"runtime/pprof.allocateReflectTransient"}, legacy: fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @( 0x[0-9,a-f]+)+ -# 0x[0-9,a-f]+ pprof\.allocateReflectTransient\+0x[0-9,a-f]+ .*/mprof_test.go:55 +# 0x[0-9,a-f]+ runtime/pprof\.allocateReflectTransient\+0x[0-9,a-f]+ .*/mprof_test.go:55 `, memoryProfilerRun, (3<<20)*memoryProfilerRun, memoryProfilerRun, (3<<20)*memoryProfilerRun), }} diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go index ff86bce..7adf891 100644 --- a/libgo/go/runtime/pprof/pprof_test.go +++ b/libgo/go/runtime/pprof/pprof_test.go @@ -958,8 +958,8 @@ func TestMutexProfile(t *testing.T) { stks := stacks(p) for _, want := range [][]string{ - // {"sync.(*Mutex).Unlock", "pprof.blockMutex.func1"}, - {"sync.Mutex.Unlock", "pprof.blockMutex..func1"}, + // {"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"}, + {"sync.Mutex.Unlock", "runtime/pprof.blockMutex..func1"}, } { if !containsStack(stks, want) { t.Errorf("No matching stack entry for %+v", want) diff --git a/libgo/go/runtime/preempt.go b/libgo/go/runtime/preempt.go index 9a78bcf..8452076 100644 --- a/libgo/go/runtime/preempt.go +++ b/libgo/go/runtime/preempt.go @@ -360,7 +360,7 @@ func isAsyncSafePoint(gp *g, pc uintptr) (bool, uintptr) { } name := f.Name() if hasPrefix(name, "runtime.") || - hasPrefix(name, "runtime..z2finternal..z2f") || + hasPrefix(name, "runtime_1internal_1") || hasPrefix(name, "reflect.") { // For now we never async preempt the runtime or // anything closely tied to the runtime. Known issues diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index 0ca6c02..6c72050 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -263,7 +263,7 @@ func main(unsafe.Pointer) { } // os_beforeExit is called from os.Exit(0). -//go:linkname os_beforeExit os.runtime_beforeExit +//go:linkname os_beforeExit os.runtime__beforeExit func os_beforeExit() { if raceenabled { racefini() @@ -3305,7 +3305,7 @@ func beforefork() { } // Called from syscall package before fork. -//go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork +//go:linkname syscall_runtime_BeforeFork syscall.runtime__BeforeFork //go:nosplit func syscall_runtime_BeforeFork() { systemstack(beforefork) @@ -3320,7 +3320,7 @@ func afterfork() { } // Called from syscall package after fork in parent. -//go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork +//go:linkname syscall_runtime_AfterFork syscall.runtime__AfterFork //go:nosplit func syscall_runtime_AfterFork() { systemstack(afterfork) @@ -3338,7 +3338,7 @@ var inForkedChild bool // temporarily sharing address space with the parent process, this must // not change any global variables or calling into C code that may do so. // -//go:linkname syscall_runtime_AfterForkInChild syscall.runtime_AfterForkInChild +//go:linkname syscall_runtime_AfterForkInChild syscall.runtime__AfterForkInChild //go:nosplit //go:nowritebarrierrec func syscall_runtime_AfterForkInChild() { @@ -3363,7 +3363,7 @@ func syscall_runtime_AfterForkInChild() { var pendingPreemptSignals uint32 // Called from syscall package before Exec. -//go:linkname syscall_runtime_BeforeExec syscall.runtime_BeforeExec +//go:linkname syscall_runtime_BeforeExec syscall.runtime__BeforeExec func syscall_runtime_BeforeExec() { // Prevent thread creation during exec. execLock.lock() @@ -3378,7 +3378,7 @@ func syscall_runtime_BeforeExec() { } // Called from syscall package after Exec. -//go:linkname syscall_runtime_AfterExec syscall.runtime_AfterExec +//go:linkname syscall_runtime_AfterExec syscall.runtime__AfterExec func syscall_runtime_AfterExec() { execLock.unlock() } @@ -5165,7 +5165,7 @@ func (l *gList) pop() *g { return gp } -//go:linkname setMaxThreads runtime..z2fdebug.setMaxThreads +//go:linkname setMaxThreads runtime_1debug.setMaxThreads func setMaxThreads(in int) (out int) { lock(&sched.lock) out = int(sched.maxmcount) @@ -5199,32 +5199,32 @@ func procUnpin() { _g_.m.locks-- } -//go:linkname sync_runtime_procPin sync.runtime_procPin +//go:linkname sync_runtime_procPin sync.runtime__procPin //go:nosplit func sync_runtime_procPin() int { return procPin() } -//go:linkname sync_runtime_procUnpin sync.runtime_procUnpin +//go:linkname sync_runtime_procUnpin sync.runtime__procUnpin //go:nosplit func sync_runtime_procUnpin() { procUnpin() } -//go:linkname sync_atomic_runtime_procPin sync..z2fatomic.runtime_procPin +//go:linkname sync_atomic_runtime_procPin sync_1atomic.runtime__procPin //go:nosplit func sync_atomic_runtime_procPin() int { return procPin() } -//go:linkname sync_atomic_runtime_procUnpin sync..z2fatomic.runtime_procUnpin +//go:linkname sync_atomic_runtime_procUnpin sync_1atomic.runtime__procUnpin //go:nosplit func sync_atomic_runtime_procUnpin() { procUnpin() } // Active spinning for sync.Mutex. -//go:linkname sync_runtime_canSpin sync.runtime_canSpin +//go:linkname sync_runtime_canSpin sync.runtime__canSpin //go:nosplit func sync_runtime_canSpin(i int) bool { // sync.Mutex is cooperative, so we are conservative with spinning. @@ -5241,7 +5241,7 @@ func sync_runtime_canSpin(i int) bool { return true } -//go:linkname sync_runtime_doSpin sync.runtime_doSpin +//go:linkname sync_runtime_doSpin sync.runtime__doSpin //go:nosplit func sync_runtime_doSpin() { procyield(active_spin_cnt) diff --git a/libgo/go/runtime/proflabel.go b/libgo/go/runtime/proflabel.go index fc655cc..1e1f3bf 100644 --- a/libgo/go/runtime/proflabel.go +++ b/libgo/go/runtime/proflabel.go @@ -8,7 +8,7 @@ import "unsafe" var labelSync uintptr -//go:linkname runtime_setProfLabel runtime..z2fpprof.runtime_setProfLabel +//go:linkname runtime_setProfLabel runtime_1pprof.runtime__setProfLabel func runtime_setProfLabel(labels unsafe.Pointer) { // Introduce race edge for read-back via profile. // This would more properly use &getg().labels as the sync address, @@ -34,7 +34,7 @@ func runtime_setProfLabel(labels unsafe.Pointer) { getg().labels = labels } -//go:linkname runtime_getProfLabel runtime..z2fpprof.runtime_getProfLabel +//go:linkname runtime_getProfLabel runtime_1pprof.runtime__getProfLabel func runtime_getProfLabel() unsafe.Pointer { return getg().labels } diff --git a/libgo/go/runtime/rdebug.go b/libgo/go/runtime/rdebug.go index 358df11..9c43ce5 100644 --- a/libgo/go/runtime/rdebug.go +++ b/libgo/go/runtime/rdebug.go @@ -11,14 +11,14 @@ import _ "unsafe" // for go:linkname // maxstacksize. var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real -//go:linkname setMaxStack runtime..z2fdebug.setMaxStack +//go:linkname setMaxStack runtime_1debug.setMaxStack func setMaxStack(in int) (out int) { out = int(maxstacksize) maxstacksize = uintptr(in) return out } -//go:linkname setPanicOnFault runtime..z2fdebug.setPanicOnFault +//go:linkname setPanicOnFault runtime_1debug.setPanicOnFault func setPanicOnFault(new bool) (old bool) { _g_ := getg() old = _g_.paniconfault diff --git a/libgo/go/runtime/runtime.go b/libgo/go/runtime/runtime.go index abc5eab..5af28ae 100644 --- a/libgo/go/runtime/runtime.go +++ b/libgo/go/runtime/runtime.go @@ -51,13 +51,13 @@ func tickspersecond() int64 { var envs []string var argslice []string -//go:linkname syscall_runtime_envs syscall.runtime_envs +//go:linkname syscall_runtime_envs syscall.runtime__envs func syscall_runtime_envs() []string { return append([]string{}, envs...) } //go:linkname syscall_Getpagesize syscall.Getpagesize func syscall_Getpagesize() int { return int(physPageSize) } -//go:linkname os_runtime_args os.runtime_args +//go:linkname os_runtime_args os.runtime__args func os_runtime_args() []string { return append([]string{}, argslice...) } //go:linkname syscall_Exit syscall.Exit diff --git a/libgo/go/runtime/runtime1.go b/libgo/go/runtime/runtime1.go index a8a53d3..39969d1 100644 --- a/libgo/go/runtime/runtime1.go +++ b/libgo/go/runtime/runtime1.go @@ -398,7 +398,7 @@ func parsedebugvars() { traceback_env = traceback_cache } -//go:linkname setTraceback runtime..z2fdebug.SetTraceback +//go:linkname setTraceback runtime_1debug.SetTraceback func setTraceback(level string) { var t uint32 switch level { diff --git a/libgo/go/runtime/sema.go b/libgo/go/runtime/sema.go index c1418b3..9a28880 100644 --- a/libgo/go/runtime/sema.go +++ b/libgo/go/runtime/sema.go @@ -51,27 +51,27 @@ var semtable [semTabSize]struct { pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte } -//go:linkname sync_runtime_Semacquire sync.runtime_Semacquire +//go:linkname sync_runtime_Semacquire sync.runtime__Semacquire func sync_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile, 0) } -//go:linkname poll_runtime_Semacquire internal..z2fpoll.runtime_Semacquire +//go:linkname poll_runtime_Semacquire internal_1poll.runtime__Semacquire func poll_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile, 0) } -//go:linkname sync_runtime_Semrelease sync.runtime_Semrelease +//go:linkname sync_runtime_Semrelease sync.runtime__Semrelease func sync_runtime_Semrelease(addr *uint32, handoff bool, skipframes int) { semrelease1(addr, handoff, skipframes) } -//go:linkname sync_runtime_SemacquireMutex sync.runtime_SemacquireMutex +//go:linkname sync_runtime_SemacquireMutex sync.runtime__SemacquireMutex func sync_runtime_SemacquireMutex(addr *uint32, lifo bool, skipframes int) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile, skipframes) } -//go:linkname poll_runtime_Semrelease internal..z2fpoll.runtime_Semrelease +//go:linkname poll_runtime_Semrelease internal_1poll.runtime__Semrelease func poll_runtime_Semrelease(addr *uint32) { semrelease(addr) } @@ -475,7 +475,7 @@ func less(a, b uint32) bool { // notifyListAdd adds the caller to a notify list such that it can receive // notifications. The caller must eventually call notifyListWait to wait for // such a notification, passing the returned ticket number. -//go:linkname notifyListAdd sync.runtime_notifyListAdd +//go:linkname notifyListAdd sync.runtime__notifyListAdd func notifyListAdd(l *notifyList) uint32 { // This may be called concurrently, for example, when called from // sync.Cond.Wait while holding a RWMutex in read mode. @@ -484,7 +484,7 @@ func notifyListAdd(l *notifyList) uint32 { // notifyListWait waits for a notification. If one has been sent since // notifyListAdd was called, it returns immediately. Otherwise, it blocks. -//go:linkname notifyListWait sync.runtime_notifyListWait +//go:linkname notifyListWait sync.runtime__notifyListWait func notifyListWait(l *notifyList, t uint32) { lockWithRank(&l.lock, lockRankNotifyList) @@ -518,7 +518,7 @@ func notifyListWait(l *notifyList, t uint32) { } // notifyListNotifyAll notifies all entries in the list. -//go:linkname notifyListNotifyAll sync.runtime_notifyListNotifyAll +//go:linkname notifyListNotifyAll sync.runtime__notifyListNotifyAll func notifyListNotifyAll(l *notifyList) { // Fast-path: if there are no new waiters since the last notification // we don't need to acquire the lock. @@ -550,7 +550,7 @@ func notifyListNotifyAll(l *notifyList) { } // notifyListNotifyOne notifies one entry in the list. -//go:linkname notifyListNotifyOne sync.runtime_notifyListNotifyOne +//go:linkname notifyListNotifyOne sync.runtime__notifyListNotifyOne func notifyListNotifyOne(l *notifyList) { // Fast-path: if there are no new waiters since the last notification // we don't need to acquire the lock at all. @@ -603,7 +603,7 @@ func notifyListNotifyOne(l *notifyList) { unlock(&l.lock) } -//go:linkname notifyListCheck sync.runtime_notifyListCheck +//go:linkname notifyListCheck sync.runtime__notifyListCheck func notifyListCheck(sz uintptr) { if sz != unsafe.Sizeof(notifyList{}) { print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n") @@ -611,7 +611,7 @@ func notifyListCheck(sz uintptr) { } } -//go:linkname sync_nanotime sync.runtime_nanotime +//go:linkname sync_nanotime sync.runtime__nanotime func sync_nanotime() int64 { return nanotime() } diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go index 7d1028e..ed024e1 100644 --- a/libgo/go/runtime/sigqueue.go +++ b/libgo/go/runtime/sigqueue.go @@ -121,7 +121,7 @@ Send: // Called to receive the next queued signal. // Must only be called from a single goroutine at a time. -//go:linkname signal_recv os..z2fsignal.signal_recv +//go:linkname signal_recv os_1signal.signal__recv func signal_recv() uint32 { for { // Serve any signals from local copy. @@ -169,7 +169,7 @@ func signal_recv() uint32 { // the signal(s) in question, and here we are just waiting to make sure // that all the signals have been delivered to the user channels // by the os/signal package. -//go:linkname signalWaitUntilIdle os..z2fsignal.signalWaitUntilIdle +//go:linkname signalWaitUntilIdle os_1signal.signalWaitUntilIdle func signalWaitUntilIdle() { // Although the signals we care about have been removed from // sig.wanted, it is possible that another thread has received @@ -189,7 +189,7 @@ func signalWaitUntilIdle() { } // Must only be called from a single goroutine at a time. -//go:linkname signal_enable os..z2fsignal.signal_enable +//go:linkname signal_enable os_1signal.signal__enable func signal_enable(s uint32) { if !sig.inuse { // This is the first call to signal_enable. Initialize. @@ -217,7 +217,7 @@ func signal_enable(s uint32) { } // Must only be called from a single goroutine at a time. -//go:linkname signal_disable os..z2fsignal.signal_disable +//go:linkname signal_disable os_1signal.signal__disable func signal_disable(s uint32) { if s >= uint32(len(sig.wanted)*32) { return @@ -230,7 +230,7 @@ func signal_disable(s uint32) { } // Must only be called from a single goroutine at a time. -//go:linkname signal_ignore os..z2fsignal.signal_ignore +//go:linkname signal_ignore os_1signal.signal__ignore func signal_ignore(s uint32) { if s >= uint32(len(sig.wanted)*32) { return @@ -257,7 +257,7 @@ func sigInitIgnored(s uint32) { } // Checked by signal handlers. -//go:linkname signal_ignored os..z2fsignal.signal_ignored +//go:linkname signal_ignored os_1signal.signal__ignored func signal_ignored(s uint32) bool { i := atomic.Load(&sig.ignored[s/32]) return i&(1<<(s&31)) != 0 diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go index bb0b61d..22a2b13 100644 --- a/libgo/go/runtime/symtab.go +++ b/libgo/go/runtime/symtab.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/bytealg" _ "unsafe" // for go:linkname ) @@ -119,7 +120,7 @@ func pcInlineCallers(pc uintptr, locbuf *location, max int32) int32 // runtime_expandFinalInlineFrame expands the final pc in stk to include all // "callers" if pc is inline. // -//go:linkname runtime_expandFinalInlineFrame runtime..z2fpprof.runtime_expandFinalInlineFrame +//go:linkname runtime_expandFinalInlineFrame runtime_1pprof.runtime__expandFinalInlineFrame func runtime_expandFinalInlineFrame(stk []uintptr) []uintptr { if len(stk) == 0 { return stk @@ -210,42 +211,62 @@ func hexDigitsToRune(digits []byte, ndig int) rune { return rune(result) } -// Perform an in-place decoding on the input byte slice. This looks -// for "..z<hex 2 >", "..u<hex x 4>" and "..U<hex x 8>" and overwrites -// with the encoded bytes corresponding to the unicode in question. -// Return value is the number of bytes taken by the result. - +// decodeIdentifier performs an in-place decoding on the input byte slice. +// This undoes the compiler underscore mangling. +// Returns the number of bytes used by the result. func decodeIdentifier(bsl []byte) int { + underscoreCodes := map[byte]byte{ + '_': '_', + '0': '.', + '1': '/', + '2': '*', + '3': ',', + '4': '{', + '5': '}', + '6': '[', + '7': ']', + '8': '(', + '9': ')', + 'a': '"', + 'b': ' ', + 'c': ';', + } + j := 0 for i := 0; i < len(bsl); i++ { b := bsl[i] + if b != '_' || i+1 >= len(bsl) { + bsl[j] = b + j++ + continue + } + + if d, ok := underscoreCodes[bsl[i+1]]; ok { + i++ + bsl[j] = d + j++ + continue + } + + rlen := 0 + switch bsl[i+1] { + case 'x': + rlen = 2 + case 'u': + rlen = 4 + case 'U': + rlen = 8 + } - if i+1 < len(bsl) && bsl[i] == '.' && bsl[i+1] == '.' { - if i+4 < len(bsl) && bsl[i+2] == 'z' { - digits := bsl[i+3:] - r := hexDigitsToRune(digits, 2) - nc := encoderune(bsl[j:], r) - j += nc - i += 4 - continue - } else if i+6 < len(bsl) && bsl[i+2] == 'u' { - digits := bsl[i+3:] - r := hexDigitsToRune(digits, 4) - nc := encoderune(bsl[j:], r) - j += nc - i += 6 - continue - } else if i+10 < len(bsl) && bsl[i+2] == 'U' { - digits := bsl[i+3:] - r := hexDigitsToRune(digits, 8) - nc := encoderune(bsl[j:], r) - j += nc - i += 10 - continue - } + if rlen > 0 && i+1+rlen < len(bsl) { + r := hexDigitsToRune(bsl[i+2:], rlen) + nc := encoderune(bsl[j:], r) + j += nc + i += rlen + 1 + } else { + bsl[j] = b + j++ } - bsl[j] = b - j += 1 } return j } @@ -254,6 +275,11 @@ func decodeIdentifier(bsl []byte) int { // as used in the compiler. func demangleSymbol(s string) string { + if bytealg.IndexByteString(s, '.') < 0 { + // A symbol with no '.' is not a Go symbol. + return s + } + bsl := []byte(s) nchars := decodeIdentifier(bsl) bsl = bsl[:nchars] diff --git a/libgo/go/runtime/trace.go b/libgo/go/runtime/trace.go index ce185fc..b05f30a 100644 --- a/libgo/go/runtime/trace.go +++ b/libgo/go/runtime/trace.go @@ -1152,7 +1152,7 @@ func traceNextGC() { // To access runtime functions from runtime/trace. // See runtime/trace/annotation.go -//go:linkname trace_userTaskCreate runtime..z2ftrace.userTaskCreate +//go:linkname trace_userTaskCreate runtime_1trace.userTaskCreate func trace_userTaskCreate(id, parentID uint64, taskType string) { if !trace.enabled { return @@ -1170,12 +1170,12 @@ func trace_userTaskCreate(id, parentID uint64, taskType string) { traceReleaseBuffer(pid) } -//go:linkname trace_userTaskEnd runtime..z2ftrace.userTaskEnd +//go:linkname trace_userTaskEnd runtime_1trace.userTaskEnd func trace_userTaskEnd(id uint64) { traceEvent(traceEvUserTaskEnd, 2, id) } -//go:linkname trace_userRegion runtime..z2ftrace.userRegion +//go:linkname trace_userRegion runtime_1trace.userRegion func trace_userRegion(id, mode uint64, name string) { if !trace.enabled { return @@ -1192,7 +1192,7 @@ func trace_userRegion(id, mode uint64, name string) { traceReleaseBuffer(pid) } -//go:linkname trace_userLog runtime..z2ftrace.userLog +//go:linkname trace_userLog runtime_1trace.userLog func trace_userLog(id uint64, category, message string) { if !trace.enabled { return diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go index 1ba91af..ebdbefc 100644 --- a/libgo/go/runtime/traceback_gccgo.go +++ b/libgo/go/runtime/traceback_gccgo.go @@ -184,10 +184,10 @@ func showfuncinfo(name string, firstFrame bool) bool { // isExportedRuntime reports whether name is an exported runtime function. // It is only for runtime functions, so ASCII A-Z is fine. Here also check // for mangled functions from runtime/<...>, which will be prefixed with -// "runtime..z2f". +// "runtime_1". func isExportedRuntime(name string) bool { const n = len("runtime.") - if hasPrefix(name, "runtime..z2f") { + if hasPrefix(name, "runtime_1") { return true } return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' |