diff options
Diffstat (limited to 'libgo/go')
26 files changed, 293 insertions, 113 deletions
diff --git a/libgo/go/cmd/cgo/main.go b/libgo/go/cmd/cgo/main.go index 1238016..8e7567b 100644 --- a/libgo/go/cmd/cgo/main.go +++ b/libgo/go/cmd/cgo/main.go @@ -229,6 +229,8 @@ var exportHeader = flag.String("exportheader", "", "where to write export header var gccgo = flag.Bool("gccgo", false, "generate files for use with gccgo") var gccgoprefix = flag.String("gccgoprefix", "", "-fgo-prefix option used with gccgo") var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used with gccgo") +var gccgoMangleCheckDone bool +var gccgoNewmanglingInEffect bool var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code") var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code") var goarch, goos string diff --git a/libgo/go/cmd/cgo/out.go b/libgo/go/cmd/cgo/out.go index 10d4b74..60b2c11 100644 --- a/libgo/go/cmd/cgo/out.go +++ b/libgo/go/cmd/cgo/out.go @@ -15,7 +15,9 @@ import ( "go/printer" "go/token" "io" + "io/ioutil" "os" + "os/exec" "path/filepath" "regexp" "sort" @@ -1191,12 +1193,91 @@ func (p *Package) writeExportHeader(fgcch io.Writer) { fmt.Fprintf(fgcch, "%s\n", p.gccExportHeaderProlog()) } -// Return the package prefix when using gccgo. -func (p *Package) gccgoSymbolPrefix() string { - if !*gccgo { - return "" +// gccgoUsesNewMangling returns whether gccgo uses the new collision-free +// packagepath mangling scheme (see determineGccgoManglingScheme for more +// info). +func gccgoUsesNewMangling() bool { + if !gccgoMangleCheckDone { + gccgoNewmanglingInEffect = determineGccgoManglingScheme() + gccgoMangleCheckDone = true + } + return gccgoNewmanglingInEffect +} + +const mangleCheckCode = ` +package läufer +func Run(x int) int { + return 1 +} +` + +// determineGccgoManglingScheme performs a runtime test to see which +// flavor of packagepath mangling gccgo is using. Older versions of +// gccgo use a simple mangling scheme where there can be collisions +// between packages whose paths are different but mangle to the same +// string. More recent versions of gccgo use a new mangler that avoids +// these collisions. Return value is whether gccgo uses the new mangling. +func determineGccgoManglingScheme() bool { + + // Emit a small Go file for gccgo to compile. + filepat := "*_gccgo_manglecheck.go" + var f *os.File + var err error + if f, err = ioutil.TempFile(*objDir, filepat); err != nil { + fatalf("%v", err) + } + gofilename := f.Name() + defer os.Remove(gofilename) + + if err = ioutil.WriteFile(gofilename, []byte(mangleCheckCode), 0666); err != nil { + fatalf("%v", err) + } + + // Compile with gccgo, capturing generated assembly. + gccgocmd := os.Getenv("GCCGO") + if gccgocmd == "" { + gpath, gerr := exec.LookPath("gccgo") + if gerr != nil { + fatalf("unable to locate gccgo: %v", gerr) + } + gccgocmd = gpath + } + cmd := exec.Command(gccgocmd, "-S", "-o", "-", gofilename) + buf, cerr := cmd.CombinedOutput() + if cerr != nil { + fatalf("%s", err) + } + + // New mangling: expect go.l..u00e4ufer.Run + // Old mangling: expect go.l__ufer.Run + return regexp.MustCompile(`go\.l\.\.u00e4ufer\.Run`).Match(buf) +} + +// gccgoPkgpathToSymbolNew converts a package path to a gccgo-style +// package symbol. +func gccgoPkgpathToSymbolNew(ppath string) string { + bsl := []byte{} + changed := false + for _, c := range []byte(ppath) { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z', + '0' <= c && c <= '9', '_' == c: + bsl = append(bsl, c) + default: + changed = true + encbytes := []byte(fmt.Sprintf("..z%02x", c)) + bsl = append(bsl, encbytes...) + } + } + if !changed { + return ppath } + return string(bsl) +} +// gccgoPkgpathToSymbolOld converts a package path to a gccgo-style +// package symbol using the older mangling scheme. +func gccgoPkgpathToSymbolOld(ppath string) string { clean := func(r rune) rune { switch { case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', @@ -1205,14 +1286,32 @@ func (p *Package) gccgoSymbolPrefix() string { } return '_' } + return strings.Map(clean, ppath) +} + +// gccgoPkgpathToSymbol converts a package path to a mangled packagepath +// symbol. +func gccgoPkgpathToSymbol(ppath string) string { + if gccgoUsesNewMangling() { + return gccgoPkgpathToSymbolNew(ppath) + } else { + return gccgoPkgpathToSymbolOld(ppath) + } +} + +// Return the package prefix when using gccgo. +func (p *Package) gccgoSymbolPrefix() string { + if !*gccgo { + return "" + } if *gccgopkgpath != "" { - return strings.Map(clean, *gccgopkgpath) + return gccgoPkgpathToSymbol(*gccgopkgpath) } if *gccgoprefix == "" && p.PackageName == "main" { return "main" } - prefix := strings.Map(clean, *gccgoprefix) + prefix := gccgoPkgpathToSymbol(*gccgoprefix) if prefix == "" { prefix = "go" } diff --git a/libgo/go/internal/bytealg/bytealg.c b/libgo/go/internal/bytealg/bytealg.c index 988dfaa..1ee1ff6 100644 --- a/libgo/go/internal/bytealg/bytealg.c +++ b/libgo/go/internal/bytealg/bytealg.c @@ -38,7 +38,7 @@ static const void *goMemmem(const void *in, size_t inl, const void *s, size_t sl #endif intgo Compare(struct __go_open_array, struct __go_open_array) - __asm__(GOSYM_PREFIX "internal_bytealg.Compare") + __asm__(GOSYM_PREFIX "internal..z2fbytealg.Compare") __attribute__((no_split_stack)); intgo Compare(struct __go_open_array a, struct __go_open_array b) @@ -67,7 +67,7 @@ intgo Compare(struct __go_open_array a, struct __go_open_array b) } _Bool Equal(struct __go_open_array, struct __go_open_array) - __asm__(GOSYM_PREFIX "internal_bytealg.Equal") + __asm__(GOSYM_PREFIX "internal..z2fbytealg.Equal") __attribute__((no_split_stack)); _Bool Equal(struct __go_open_array a, struct __go_open_array b) @@ -82,7 +82,7 @@ _Bool Equal(struct __go_open_array a, struct __go_open_array b) } intgo IndexByte(struct __go_open_array, byte) - __asm__(GOSYM_PREFIX "internal_bytealg.IndexByte") + __asm__(GOSYM_PREFIX "internal..z2fbytealg.IndexByte") __attribute__((no_split_stack)); intgo IndexByte(struct __go_open_array b, byte c) @@ -98,7 +98,7 @@ intgo IndexByte(struct __go_open_array b, byte c) intgo IndexByteString(String, byte) - __asm__(GOSYM_PREFIX "internal_bytealg.IndexByteString") + __asm__(GOSYM_PREFIX "internal..z2fbytealg.IndexByteString") __attribute__((no_split_stack)); intgo IndexByteString(String s, byte c) @@ -113,7 +113,7 @@ intgo IndexByteString(String s, byte c) } intgo Index(struct __go_open_array, struct __go_open_array) - __asm__(GOSYM_PREFIX "internal_bytealg.Index") + __asm__(GOSYM_PREFIX "internal..z2fbytealg.Index") __attribute__((no_split_stack)); intgo Index(struct __go_open_array a, struct __go_open_array b) @@ -128,7 +128,7 @@ intgo Index(struct __go_open_array a, struct __go_open_array b) } intgo IndexString(String, String) - __asm__(GOSYM_PREFIX "internal_bytealg.IndexString") + __asm__(GOSYM_PREFIX "internal..z2fbytealg.IndexString") __attribute__((no_split_stack)); intgo IndexString(String a, String b) diff --git a/libgo/go/internal/cpu/cpu_gccgo.c b/libgo/go/internal/cpu/cpu_gccgo.c index 1d5b492..f9ebd8b 100644 --- a/libgo/go/internal/cpu/cpu_gccgo.c +++ b/libgo/go/internal/cpu/cpu_gccgo.c @@ -21,7 +21,7 @@ struct cpuid_ret { }; struct cpuid_ret cpuid(uint32_t, uint32_t) - __asm__(GOSYM_PREFIX "internal_cpu.cpuid") + __asm__(GOSYM_PREFIX "internal..z2fcpu.cpuid") __attribute__((no_split_stack)); struct cpuid_ret cpuid(uint32_t eaxArg, uint32_t ecxArg) { @@ -45,7 +45,7 @@ struct xgetbv_ret { }; struct xgetbv_ret xgetbv(void) - __asm__(GOSYM_PREFIX "internal_cpu.xgetbv") + __asm__(GOSYM_PREFIX "internal..z2fcpu.xgetbv") __attribute__((no_split_stack)); #pragma GCC push_options diff --git a/libgo/go/runtime/atomic_pointer.go b/libgo/go/runtime/atomic_pointer.go index 2d023d3..03d8d6a 100644 --- a/libgo/go/runtime/atomic_pointer.go +++ b/libgo/go/runtime/atomic_pointer.go @@ -52,10 +52,10 @@ func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { // We cannot just call the runtime routines, because the race detector expects // to be able to intercept the sync/atomic forms but not the runtime forms. -//go:linkname sync_atomic_StoreUintptr sync_atomic.StoreUintptr +//go:linkname sync_atomic_StoreUintptr sync..z2fatomic.StoreUintptr func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr) -//go:linkname sync_atomic_StorePointer sync_atomic.StorePointer +//go:linkname sync_atomic_StorePointer sync..z2fatomic.StorePointer //go:nosplit func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { if writeBarrier.enabled { @@ -64,10 +64,10 @@ func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) } -//go:linkname sync_atomic_SwapUintptr sync_atomic.SwapUintptr +//go:linkname sync_atomic_SwapUintptr sync..z2fatomic.SwapUintptr func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr -//go:linkname sync_atomic_SwapPointer sync_atomic.SwapPointer +//go:linkname sync_atomic_SwapPointer sync..z2fatomic.SwapPointer //go:nosplit func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { if writeBarrier.enabled { @@ -77,10 +77,10 @@ func sync_atomic_SwapPointer(ptr *unsafe.Pointer, new unsafe.Pointer) unsafe.Poi return old } -//go:linkname sync_atomic_CompareAndSwapUintptr sync_atomic.CompareAndSwapUintptr +//go:linkname sync_atomic_CompareAndSwapUintptr sync..z2fatomic.CompareAndSwapUintptr func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool -//go:linkname sync_atomic_CompareAndSwapPointer sync_atomic.CompareAndSwapPointer +//go:linkname sync_atomic_CompareAndSwapPointer sync..z2fatomic.CompareAndSwapPointer //go:nosplit func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { if writeBarrier.enabled { diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go index b1a7c3b..e7cf1b4 100644 --- a/libgo/go/runtime/cpuprof.go +++ b/libgo/go/runtime/cpuprof.go @@ -186,7 +186,7 @@ func CPUProfile() []byte { panic("CPUProfile no longer available") } -//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime_pprof.runtime_cyclesPerSecond +//go:linkname runtime_pprof_runtime_cyclesPerSecond runtime..z2fpprof.runtime_cyclesPerSecond func runtime_pprof_runtime_cyclesPerSecond() int64 { return tickspersecond() } @@ -197,7 +197,7 @@ func runtime_pprof_runtime_cyclesPerSecond() int64 { // on has been returned, readProfile returns eof=true. // The caller must save the returned data and tags before calling readProfile again. // -//go:linkname runtime_pprof_readProfile runtime_pprof.readProfile +//go:linkname runtime_pprof_readProfile runtime..z2fpprof.readProfile func runtime_pprof_readProfile() ([]uint64, []unsafe.Pointer, bool) { lock(&cpuprof.lock) log := cpuprof.log diff --git a/libgo/go/runtime/debug/stack_test.go b/libgo/go/runtime/debug/stack_test.go index 67931d1..309eddd 100644 --- a/libgo/go/runtime/debug/stack_test.go +++ b/libgo/go/runtime/debug/stack_test.go @@ -51,10 +51,10 @@ func TestStack(t *testing.T) { n++ } n++ - frame("stack.go", "runtime_debug.Stack") + frame("stack.go", "debug.Stack") frame("stack_test.go", "ptrmethod") frame("stack_test.go", "method") - frame("stack_test.go", "runtime_debug_test.TestStack") + frame("stack_test.go", "test.TestStack") frame("testing.go", "") } diff --git a/libgo/go/runtime/heapdump.go b/libgo/go/runtime/heapdump.go index e92ea39..5ebebf6 100644 --- a/libgo/go/runtime/heapdump.go +++ b/libgo/go/runtime/heapdump.go @@ -16,7 +16,7 @@ import ( "unsafe" ) -//go:linkname runtime_debug_WriteHeapDump runtime_debug.WriteHeapDump +//go:linkname runtime_debug_WriteHeapDump runtime..z2fdebug.WriteHeapDump func runtime_debug_WriteHeapDump(fd uintptr) { stopTheWorld("write heap dump") diff --git a/libgo/go/runtime/internal/atomic/atomic.c b/libgo/go/runtime/internal/atomic/atomic.c index 24820f2..b87fae9 100644 --- a/libgo/go/runtime/internal/atomic/atomic.c +++ b/libgo/go/runtime/internal/atomic/atomic.c @@ -7,7 +7,7 @@ #include "runtime.h" uint32_t Load (uint32_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Load") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load") __attribute__ ((no_split_stack)); uint32_t @@ -17,7 +17,7 @@ Load (uint32_t *ptr) } void *Loadp (void *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loadp") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loadp") __attribute__ ((no_split_stack)); void * @@ -27,7 +27,7 @@ Loadp (void *ptr) } uint64_t Load64 (uint64_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Load64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Load64") __attribute__ ((no_split_stack)); uint64_t @@ -39,7 +39,7 @@ Load64 (uint64_t *ptr) } uintptr_t Loaduintptr (uintptr_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loaduintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loaduintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -49,7 +49,7 @@ Loaduintptr (uintptr_t *ptr) } uintgo Loaduint (uintgo *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loaduint") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loaduint") __attribute__ ((no_split_stack)); uintgo @@ -59,7 +59,7 @@ Loaduint (uintgo *ptr) } int64_t Loadint64 (int64_t *ptr) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Loadint64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Loadint64") __attribute__ ((no_split_stack)); int64_t @@ -71,7 +71,7 @@ Loadint64 (int64_t *ptr) } uint32_t Xadd (uint32_t *ptr, int32_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xadd") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadd") __attribute__ ((no_split_stack)); uint32_t @@ -81,7 +81,7 @@ Xadd (uint32_t *ptr, int32_t delta) } uint64_t Xadd64 (uint64_t *ptr, int64_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xadd64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadd64") __attribute__ ((no_split_stack)); uint64_t @@ -93,7 +93,7 @@ Xadd64 (uint64_t *ptr, int64_t delta) } uintptr_t Xadduintptr (uintptr_t *ptr, uintptr_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xadduintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xadduintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -103,7 +103,7 @@ Xadduintptr (uintptr_t *ptr, uintptr_t delta) } int64_t Xaddint64 (int64_t *ptr, int64_t delta) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xaddint64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xaddint64") __attribute__ ((no_split_stack)); int64_t @@ -115,7 +115,7 @@ Xaddint64 (int64_t *ptr, int64_t delta) } uint32_t Xchg (uint32_t *ptr, uint32_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xchg") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchg") __attribute__ ((no_split_stack)); uint32_t @@ -125,7 +125,7 @@ Xchg (uint32_t *ptr, uint32_t new) } uint64_t Xchg64 (uint64_t *ptr, uint64_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xchg64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchg64") __attribute__ ((no_split_stack)); uint64_t @@ -137,7 +137,7 @@ Xchg64 (uint64_t *ptr, uint64_t new) } uintptr_t Xchguintptr (uintptr_t *ptr, uintptr_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Xchguintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Xchguintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -147,7 +147,7 @@ Xchguintptr (uintptr_t *ptr, uintptr_t new) } void And8 (uint8_t *ptr, uint8_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.And8") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.And8") __attribute__ ((no_split_stack)); void @@ -157,7 +157,7 @@ And8 (uint8_t *ptr, uint8_t val) } void Or8 (uint8_t *ptr, uint8_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Or8") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Or8") __attribute__ ((no_split_stack)); void @@ -167,7 +167,7 @@ Or8 (uint8_t *ptr, uint8_t val) } _Bool Cas (uint32_t *ptr, uint32_t old, uint32_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Cas") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Cas") __attribute__ ((no_split_stack)); _Bool @@ -177,7 +177,7 @@ Cas (uint32_t *ptr, uint32_t old, uint32_t new) } _Bool Cas64 (uint64_t *ptr, uint64_t old, uint64_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Cas64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Cas64") __attribute__ ((no_split_stack)); _Bool @@ -189,7 +189,7 @@ Cas64 (uint64_t *ptr, uint64_t old, uint64_t new) } _Bool Casp1 (void **ptr, void *old, void *new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Casp1") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Casp1") __attribute__ ((no_split_stack)); _Bool @@ -199,7 +199,7 @@ Casp1 (void **ptr, void *old, void *new) } _Bool Casuintptr (uintptr_t *ptr, uintptr_t old, uintptr_t new) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Casuintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Casuintptr") __attribute__ ((no_split_stack)); _Bool @@ -209,7 +209,7 @@ Casuintptr (uintptr_t *ptr, uintptr_t old, uintptr_t new) } void Store (uint32_t *ptr, uint32_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Store") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store") __attribute__ ((no_split_stack)); void @@ -219,7 +219,7 @@ Store (uint32_t *ptr, uint32_t val) } void Store64 (uint64_t *ptr, uint64_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Store64") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Store64") __attribute__ ((no_split_stack)); void @@ -231,7 +231,7 @@ Store64 (uint64_t *ptr, uint64_t val) } void Storeuintptr (uintptr_t *ptr, uintptr_t val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.Storeuintptr") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.Storeuintptr") __attribute__ ((no_split_stack)); void @@ -241,7 +241,7 @@ Storeuintptr (uintptr_t *ptr, uintptr_t val) } void StorepNoWB (void *ptr, void *val) - __asm__ (GOSYM_PREFIX "runtime_internal_atomic.StorepNoWB") + __asm__ (GOSYM_PREFIX "runtime..z2finternal..z2fatomic.StorepNoWB") __attribute__ ((no_split_stack)); void diff --git a/libgo/go/runtime/mgc.go b/libgo/go/runtime/mgc.go index 4ef982d..de84084 100644 --- a/libgo/go/runtime/mgc.go +++ b/libgo/go/runtime/mgc.go @@ -219,7 +219,7 @@ func gcenable() { memstats.enablegc = true // now that runtime is initialized, GC is okay } -//go:linkname setGCPercent runtime_debug.setGCPercent +//go:linkname setGCPercent runtime..z2fdebug.setGCPercent func setGCPercent(in int32) (out int32) { lock(&mheap_.lock) out = gcpercent diff --git a/libgo/go/runtime/mheap.go b/libgo/go/runtime/mheap.go index 65622f4..eb98083 100644 --- a/libgo/go/runtime/mheap.go +++ b/libgo/go/runtime/mheap.go @@ -1165,7 +1165,7 @@ func (h *mheap) scavenge(k int32, now, limit uint64) { } } -//go:linkname runtime_debug_freeOSMemory runtime_debug.freeOSMemory +//go:linkname runtime_debug_freeOSMemory runtime..z2fdebug.freeOSMemory func runtime_debug_freeOSMemory() { GC() systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) }) diff --git a/libgo/go/runtime/mstats.go b/libgo/go/runtime/mstats.go index f54ce9d..8aac850 100644 --- a/libgo/go/runtime/mstats.go +++ b/libgo/go/runtime/mstats.go @@ -477,7 +477,7 @@ func readmemstats_m(stats *MemStats) { stats.StackSys += stats.StackInuse } -//go:linkname readGCStats runtime_debug.readGCStats +//go:linkname readGCStats runtime..z2fdebug.readGCStats func readGCStats(pauses *[]uint64) { systemstack(func() { readGCStats_m(pauses) diff --git a/libgo/go/runtime/net_plan9.go b/libgo/go/runtime/net_plan9.go index 77ae8c6..907c319 100644 --- a/libgo/go/runtime/net_plan9.go +++ b/libgo/go/runtime/net_plan9.go @@ -8,12 +8,12 @@ import ( _ "unsafe" ) -//go:linkname runtime_ignoreHangup internal_poll.runtime_ignoreHangup +//go:linkname runtime_ignoreHangup internal..z2fpoll.runtime_ignoreHangup func runtime_ignoreHangup() { getg().m.ignoreHangup = true } -//go:linkname runtime_unignoreHangup internal_poll.runtime_unignoreHangup +//go:linkname runtime_unignoreHangup internal..z2fpoll.runtime_unignoreHangup func runtime_unignoreHangup(sig string) { getg().m.ignoreHangup = false } diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go index ab3d14d..6016b7d 100644 --- a/libgo/go/runtime/netpoll.go +++ b/libgo/go/runtime/netpoll.go @@ -85,7 +85,7 @@ var ( netpollWaiters uint32 ) -//go:linkname poll_runtime_pollServerInit internal_poll.runtime_pollServerInit +//go:linkname poll_runtime_pollServerInit internal..z2fpoll.runtime_pollServerInit func poll_runtime_pollServerInit() { netpollinit() atomic.Store(&netpollInited, 1) @@ -95,7 +95,7 @@ func netpollinited() bool { return atomic.Load(&netpollInited) != 0 } -//go:linkname poll_runtime_pollServerDescriptor internal_poll.runtime_pollServerDescriptor +//go:linkname poll_runtime_pollServerDescriptor internal..z2fpoll.runtime_pollServerDescriptor // poll_runtime_pollServerDescriptor returns the descriptor being used, // or ^uintptr(0) if the system does not use a poll descriptor. @@ -103,7 +103,7 @@ func poll_runtime_pollServerDescriptor() uintptr { return netpolldescriptor() } -//go:linkname poll_runtime_pollOpen internal_poll.runtime_pollOpen +//go:linkname poll_runtime_pollOpen internal..z2fpoll.runtime_pollOpen func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) { pd := pollcache.alloc() lock(&pd.lock) @@ -127,7 +127,7 @@ func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) { return pd, int(errno) } -//go:linkname poll_runtime_pollClose internal_poll.runtime_pollClose +//go:linkname poll_runtime_pollClose internal..z2fpoll.runtime_pollClose func poll_runtime_pollClose(pd *pollDesc) { if !pd.closing { throw("runtime: close polldesc w/o unblock") @@ -149,7 +149,7 @@ func (c *pollCache) free(pd *pollDesc) { unlock(&c.lock) } -//go:linkname poll_runtime_pollReset internal_poll.runtime_pollReset +//go:linkname poll_runtime_pollReset internal..z2fpoll.runtime_pollReset func poll_runtime_pollReset(pd *pollDesc, mode int) int { err := netpollcheckerr(pd, int32(mode)) if err != 0 { @@ -163,7 +163,7 @@ func poll_runtime_pollReset(pd *pollDesc, mode int) int { return 0 } -//go:linkname poll_runtime_pollWait internal_poll.runtime_pollWait +//go:linkname poll_runtime_pollWait internal..z2fpoll.runtime_pollWait func poll_runtime_pollWait(pd *pollDesc, mode int) int { err := netpollcheckerr(pd, int32(mode)) if err != 0 { @@ -185,7 +185,7 @@ func poll_runtime_pollWait(pd *pollDesc, mode int) int { return 0 } -//go:linkname poll_runtime_pollWaitCanceled internal_poll.runtime_pollWaitCanceled +//go:linkname poll_runtime_pollWaitCanceled internal..z2fpoll.runtime_pollWaitCanceled func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) { // This function is used only on windows after a failed attempt to cancel // a pending async IO operation. Wait for ioready, ignore closing or timeouts. @@ -193,7 +193,7 @@ func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) { } } -//go:linkname poll_runtime_pollSetDeadline internal_poll.runtime_pollSetDeadline +//go:linkname poll_runtime_pollSetDeadline internal..z2fpoll.runtime_pollSetDeadline func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { lock(&pd.lock) if pd.closing { @@ -263,7 +263,7 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { } } -//go:linkname poll_runtime_pollUnblock internal_poll.runtime_pollUnblock +//go:linkname poll_runtime_pollUnblock internal..z2fpoll.runtime_pollUnblock func poll_runtime_pollUnblock(pd *pollDesc) { lock(&pd.lock) if pd.closing { diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go index 5d77a1d..f428827 100644 --- a/libgo/go/runtime/pprof/mprof_test.go +++ b/libgo/go/runtime/pprof/mprof_test.go @@ -87,19 +87,19 @@ func TestMemoryProfiler(t *testing.T) { fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+ # 0x[0-9,a-f]+ pprof\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:40 -# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:74 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:74 `, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+ # 0x[0-9,a-f]+ pprof\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:21 -# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:72 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:72 `, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), // This should start with "0: 0" but gccgo's imprecise // GC means that sometimes the value is not collected. fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+ # 0x[0-9,a-f]+ pprof\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:27 -# 0x[0-9,a-f]+ runtime_pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:73 +# 0x[0-9,a-f]+ runtime/pprof\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:73 `, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun), // This should start with "0: 0" but gccgo's imprecise diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index 77d379b..bb16924 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -4670,7 +4670,7 @@ func runqsteal(_p_, p2 *p, stealRunNextG bool) *g { return gp } -//go:linkname setMaxThreads runtime_debug.setMaxThreads +//go:linkname setMaxThreads runtime..z2fdebug.setMaxThreads func setMaxThreads(in int) (out int) { lock(&sched.lock) out = int(sched.maxmcount) @@ -4716,13 +4716,13 @@ func sync_runtime_procUnpin() { procUnpin() } -//go:linkname sync_atomic_runtime_procPin sync_atomic.runtime_procPin +//go:linkname sync_atomic_runtime_procPin sync..z2fatomic.runtime_procPin //go:nosplit func sync_atomic_runtime_procPin() int { return procPin() } -//go:linkname sync_atomic_runtime_procUnpin sync_atomic.runtime_procUnpin +//go:linkname sync_atomic_runtime_procUnpin sync..z2fatomic.runtime_procUnpin //go:nosplit func sync_atomic_runtime_procUnpin() { procUnpin() diff --git a/libgo/go/runtime/proflabel.go b/libgo/go/runtime/proflabel.go index ff73fe4..fc655cc 100644 --- a/libgo/go/runtime/proflabel.go +++ b/libgo/go/runtime/proflabel.go @@ -8,7 +8,7 @@ import "unsafe" var labelSync uintptr -//go:linkname runtime_setProfLabel runtime_pprof.runtime_setProfLabel +//go:linkname runtime_setProfLabel runtime..z2fpprof.runtime_setProfLabel func runtime_setProfLabel(labels unsafe.Pointer) { // Introduce race edge for read-back via profile. // This would more properly use &getg().labels as the sync address, @@ -34,7 +34,7 @@ func runtime_setProfLabel(labels unsafe.Pointer) { getg().labels = labels } -//go:linkname runtime_getProfLabel runtime_pprof.runtime_getProfLabel +//go:linkname runtime_getProfLabel runtime..z2fpprof.runtime_getProfLabel func runtime_getProfLabel() unsafe.Pointer { return getg().labels } diff --git a/libgo/go/runtime/rdebug.go b/libgo/go/runtime/rdebug.go index 76535a9..358df11 100644 --- a/libgo/go/runtime/rdebug.go +++ b/libgo/go/runtime/rdebug.go @@ -11,14 +11,14 @@ import _ "unsafe" // for go:linkname // maxstacksize. var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real -//go:linkname setMaxStack runtime_debug.setMaxStack +//go:linkname setMaxStack runtime..z2fdebug.setMaxStack func setMaxStack(in int) (out int) { out = int(maxstacksize) maxstacksize = uintptr(in) return out } -//go:linkname setPanicOnFault runtime_debug.setPanicOnFault +//go:linkname setPanicOnFault runtime..z2fdebug.setPanicOnFault func setPanicOnFault(new bool) (old bool) { _g_ := getg() old = _g_.paniconfault diff --git a/libgo/go/runtime/runtime1.go b/libgo/go/runtime/runtime1.go index 8b1b0a0..050f180 100644 --- a/libgo/go/runtime/runtime1.go +++ b/libgo/go/runtime/runtime1.go @@ -413,7 +413,7 @@ func parsedebugvars() { traceback_env = traceback_cache } -//go:linkname setTraceback runtime_debug.SetTraceback +//go:linkname setTraceback runtime..z2fdebug.SetTraceback func setTraceback(level string) { var t uint32 switch level { diff --git a/libgo/go/runtime/sema.go b/libgo/go/runtime/sema.go index cb7d3cd..273e8aa 100644 --- a/libgo/go/runtime/sema.go +++ b/libgo/go/runtime/sema.go @@ -56,7 +56,7 @@ func sync_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile) } -//go:linkname poll_runtime_Semacquire internal_poll.runtime_Semacquire +//go:linkname poll_runtime_Semacquire internal..z2fpoll.runtime_Semacquire func poll_runtime_Semacquire(addr *uint32) { semacquire1(addr, false, semaBlockProfile) } @@ -71,7 +71,7 @@ func sync_runtime_SemacquireMutex(addr *uint32, lifo bool) { semacquire1(addr, lifo, semaBlockProfile|semaMutexProfile) } -//go:linkname poll_runtime_Semrelease internal_poll.runtime_Semrelease +//go:linkname poll_runtime_Semrelease internal..z2fpoll.runtime_Semrelease func poll_runtime_Semrelease(addr *uint32) { semrelease(addr) } diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go index cf926a9..1a29b20 100644 --- a/libgo/go/runtime/sigqueue.go +++ b/libgo/go/runtime/sigqueue.go @@ -117,7 +117,7 @@ Send: // Called to receive the next queued signal. // Must only be called from a single goroutine at a time. -//go:linkname signal_recv os_signal.signal_recv +//go:linkname signal_recv os..z2fsignal.signal_recv func signal_recv() uint32 { for { // Serve any signals from local copy. @@ -161,7 +161,7 @@ func signal_recv() uint32 { // the signal(s) in question, and here we are just waiting to make sure // that all the signals have been delivered to the user channels // by the os/signal package. -//go:linkname signalWaitUntilIdle os_signal.signalWaitUntilIdle +//go:linkname signalWaitUntilIdle os..z2fsignal.signalWaitUntilIdle func signalWaitUntilIdle() { // Although the signals we care about have been removed from // sig.wanted, it is possible that another thread has received @@ -181,7 +181,7 @@ func signalWaitUntilIdle() { } // Must only be called from a single goroutine at a time. -//go:linkname signal_enable os_signal.signal_enable +//go:linkname signal_enable os..z2fsignal.signal_enable func signal_enable(s uint32) { if !sig.inuse { // The first call to signal_enable is for us @@ -208,7 +208,7 @@ func signal_enable(s uint32) { } // Must only be called from a single goroutine at a time. -//go:linkname signal_disable os_signal.signal_disable +//go:linkname signal_disable os..z2fsignal.signal_disable func signal_disable(s uint32) { if s >= uint32(len(sig.wanted)*32) { return @@ -221,7 +221,7 @@ func signal_disable(s uint32) { } // Must only be called from a single goroutine at a time. -//go:linkname signal_ignore os_signal.signal_ignore +//go:linkname signal_ignore os..z2fsignal.signal_ignore func signal_ignore(s uint32) { if s >= uint32(len(sig.wanted)*32) { return @@ -248,7 +248,7 @@ func sigInitIgnored(s uint32) { } // Checked by signal handlers. -//go:linkname signal_ignored os_signal.signal_ignored +//go:linkname signal_ignored os..z2fsignal.signal_ignored func signal_ignored(s uint32) bool { i := atomic.Load(&sig.ignored[s/32]) return i&(1<<(s&31)) != 0 diff --git a/libgo/go/runtime/symtab.go b/libgo/go/runtime/symtab.go index 861921c..d137122 100644 --- a/libgo/go/runtime/symtab.go +++ b/libgo/go/runtime/symtab.go @@ -83,6 +83,11 @@ func (ci *Frames) Next() (frame Frame, more bool) { if function == "" && file == "" { return Frame{}, more } + + // Demangle function name if needed. + function = demangleSymbol(function) + + // Create entry. entry := funcentry(pc - 1) f := &Func{name: function, entry: entry} @@ -182,6 +187,75 @@ func (f *Func) FileLine(pc uintptr) (file string, line int) { return file, line } +func hexval(b byte) uint { + if b >= '0' && b <= '9' { + return uint(b - '0') + } + if b >= 'a' && b <= 'f' { + return uint(b-'a') + 10 + } + return 0 +} + +func hexDigitsToRune(digits []byte, ndig int) rune { + result := uint(0) + for i := 0; i < ndig; i++ { + result <<= uint(4) + result |= hexval(digits[i]) + } + return rune(result) +} + +// Perform an in-place decoding on the input byte slice. This looks +// for "..z<hex 2 >", "..u<hex x 4>" and "..U<hex x 8>" and overwrites +// with the encoded bytes corresponding to the unicode in question. +// Return value is the number of bytes taken by the result. + +func decodeIdentifier(bsl []byte) int { + j := 0 + for i := 0; i < len(bsl); i++ { + b := bsl[i] + + if i+1 < len(bsl) && bsl[i] == '.' && bsl[i+1] == '.' { + if i+4 < len(bsl) && bsl[i+2] == 'z' { + digits := bsl[i+3:] + r := hexDigitsToRune(digits, 2) + nc := encoderune(bsl[j:], r) + j += nc + i += 4 + continue + } else if i+6 < len(bsl) && bsl[i+2] == 'u' { + digits := bsl[i+3:] + r := hexDigitsToRune(digits, 4) + nc := encoderune(bsl[j:], r) + j += nc + i += 6 + continue + } else if i+10 < len(bsl) && bsl[i+2] == 'U' { + digits := bsl[i+3:] + r := hexDigitsToRune(digits, 8) + nc := encoderune(bsl[j:], r) + j += nc + i += 10 + continue + } + } + bsl[j] = b + j += 1 + } + return j +} + +// Demangle a function symbol. Applies the reverse of go_encode_id() +// as used in the compiler. + +func demangleSymbol(s string) string { + bsl := []byte(s) + nchars := decodeIdentifier(bsl) + bsl = bsl[:nchars] + return string(bsl) +} + // implemented in go-caller.c func funcfileline(uintptr, int32) (string, string, int) func funcentry(uintptr) uintptr diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go index a95d95b..ea61baa 100644 --- a/libgo/go/runtime/time.go +++ b/libgo/go/runtime/time.go @@ -441,7 +441,7 @@ func badTimer() { // Entry points for net, time to call nanotime. -//go:linkname poll_runtimeNano internal_poll.runtimeNano +//go:linkname poll_runtimeNano internal..z2fpoll.runtimeNano func poll_runtimeNano() int64 { return nanotime() } diff --git a/libgo/go/runtime/trace.go b/libgo/go/runtime/trace.go index 7aed9a9..530d5e4 100644 --- a/libgo/go/runtime/trace.go +++ b/libgo/go/runtime/trace.go @@ -1143,7 +1143,7 @@ func traceNextGC() { // To access runtime functions from runtime/trace. // See runtime/trace/annotation.go -//go:linkname trace_userTaskCreate runtime_trace.userTaskCreate +//go:linkname trace_userTaskCreate runtime..z2ftrace.userTaskCreate func trace_userTaskCreate(id, parentID uint64, taskType string) { if !trace.enabled { return @@ -1161,12 +1161,12 @@ func trace_userTaskCreate(id, parentID uint64, taskType string) { traceReleaseBuffer(pid) } -//go:linkname trace_userTaskEnd runtime_trace.userTaskEnd +//go:linkname trace_userTaskEnd runtime..z2ftrace.userTaskEnd func trace_userTaskEnd(id uint64) { traceEvent(traceEvUserTaskEnd, 2, id) } -//go:linkname trace_userRegion runtime_trace.userRegion +//go:linkname trace_userRegion runtime..z2ftrace.userRegion func trace_userRegion(id, mode uint64, name string) { if !trace.enabled { return @@ -1183,7 +1183,7 @@ func trace_userRegion(id, mode uint64, name string) { traceReleaseBuffer(pid) } -//go:linkname trace_userLog runtime_trace.userLog +//go:linkname trace_userLog runtime..z2ftrace.userLog func trace_userLog(id uint64, category, message string) { if !trace.enabled { return diff --git a/libgo/go/runtime/traceback_gccgo.go b/libgo/go/runtime/traceback_gccgo.go index e97071e..7347cea 100644 --- a/libgo/go/runtime/traceback_gccgo.go +++ b/libgo/go/runtime/traceback_gccgo.go @@ -110,9 +110,14 @@ func showframe(name string, gp *g) bool { } // isExportedRuntime reports whether name is an exported runtime function. -// It is only for runtime functions, so ASCII A-Z is fine. +// It is only for runtime functions, so ASCII A-Z is fine. Here also check +// for mangled functions from runtime/<...>, which will be prefixed with +// "runtime..z2f". func isExportedRuntime(name string) bool { const n = len("runtime.") + if hasprefix(name, "runtime..z2f") { + return true + } return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' } diff --git a/libgo/go/sync/atomic/atomic.c b/libgo/go/sync/atomic/atomic.c index 07a4306..6cc730f 100644 --- a/libgo/go/sync/atomic/atomic.c +++ b/libgo/go/sync/atomic/atomic.c @@ -9,7 +9,7 @@ #include "runtime.h" int32_t SwapInt32 (int32_t *, int32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.SwapInt32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.SwapInt32") __attribute__ ((no_split_stack)); int32_t @@ -19,7 +19,7 @@ SwapInt32 (int32_t *addr, int32_t new) } int64_t SwapInt64 (int64_t *, int64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.SwapInt64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.SwapInt64") __attribute__ ((no_split_stack)); int64_t @@ -31,7 +31,7 @@ SwapInt64 (int64_t *addr, int64_t new) } uint32_t SwapUint32 (uint32_t *, uint32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.SwapUint32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.SwapUint32") __attribute__ ((no_split_stack)); uint32_t @@ -41,7 +41,7 @@ SwapUint32 (uint32_t *addr, uint32_t new) } uint64_t SwapUint64 (uint64_t *, uint64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.SwapUint64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.SwapUint64") __attribute__ ((no_split_stack)); uint64_t @@ -53,7 +53,7 @@ SwapUint64 (uint64_t *addr, uint64_t new) } uintptr_t SwapUintptr (uintptr_t *, uintptr_t) - __asm__ (GOSYM_PREFIX "sync_atomic.SwapUintptr") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.SwapUintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -63,7 +63,7 @@ SwapUintptr (uintptr_t *addr, uintptr_t new) } _Bool CompareAndSwapInt32 (int32_t *, int32_t, int32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapInt32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.CompareAndSwapInt32") __attribute__ ((no_split_stack)); _Bool @@ -73,7 +73,7 @@ CompareAndSwapInt32 (int32_t *val, int32_t old, int32_t new) } _Bool CompareAndSwapInt64 (int64_t *, int64_t, int64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapInt64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.CompareAndSwapInt64") __attribute__ ((no_split_stack)); _Bool @@ -85,7 +85,7 @@ CompareAndSwapInt64 (int64_t *val, int64_t old, int64_t new) } _Bool CompareAndSwapUint32 (uint32_t *, uint32_t, uint32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUint32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.CompareAndSwapUint32") __attribute__ ((no_split_stack)); _Bool @@ -95,7 +95,7 @@ CompareAndSwapUint32 (uint32_t *val, uint32_t old, uint32_t new) } _Bool CompareAndSwapUint64 (uint64_t *, uint64_t, uint64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUint64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.CompareAndSwapUint64") __attribute__ ((no_split_stack)); _Bool @@ -107,7 +107,7 @@ CompareAndSwapUint64 (uint64_t *val, uint64_t old, uint64_t new) } _Bool CompareAndSwapUintptr (uintptr_t *, uintptr_t, uintptr_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUintptr") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.CompareAndSwapUintptr") __attribute__ ((no_split_stack)); _Bool @@ -117,7 +117,7 @@ CompareAndSwapUintptr (uintptr_t *val, uintptr_t old, uintptr_t new) } int32_t AddInt32 (int32_t *, int32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddInt32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.AddInt32") __attribute__ ((no_split_stack)); int32_t @@ -127,7 +127,7 @@ AddInt32 (int32_t *val, int32_t delta) } uint32_t AddUint32 (uint32_t *, uint32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddUint32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.AddUint32") __attribute__ ((no_split_stack)); uint32_t @@ -137,7 +137,7 @@ AddUint32 (uint32_t *val, uint32_t delta) } int64_t AddInt64 (int64_t *, int64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddInt64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.AddInt64") __attribute__ ((no_split_stack)); int64_t @@ -149,7 +149,7 @@ AddInt64 (int64_t *val, int64_t delta) } uint64_t AddUint64 (uint64_t *, uint64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddUint64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.AddUint64") __attribute__ ((no_split_stack)); uint64_t @@ -161,7 +161,7 @@ AddUint64 (uint64_t *val, uint64_t delta) } uintptr_t AddUintptr (uintptr_t *, uintptr_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddUintptr") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.AddUintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -171,7 +171,7 @@ AddUintptr (uintptr_t *val, uintptr_t delta) } int32_t LoadInt32 (int32_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadInt32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.LoadInt32") __attribute__ ((no_split_stack)); int32_t @@ -186,7 +186,7 @@ LoadInt32 (int32_t *addr) } int64_t LoadInt64 (int64_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadInt64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.LoadInt64") __attribute__ ((no_split_stack)); int64_t @@ -203,7 +203,7 @@ LoadInt64 (int64_t *addr) } uint32_t LoadUint32 (uint32_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadUint32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.LoadUint32") __attribute__ ((no_split_stack)); uint32_t @@ -218,7 +218,7 @@ LoadUint32 (uint32_t *addr) } uint64_t LoadUint64 (uint64_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadUint64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.LoadUint64") __attribute__ ((no_split_stack)); uint64_t @@ -235,7 +235,7 @@ LoadUint64 (uint64_t *addr) } uintptr_t LoadUintptr (uintptr_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadUintptr") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.LoadUintptr") __attribute__ ((no_split_stack)); uintptr_t @@ -250,7 +250,7 @@ LoadUintptr (uintptr_t *addr) } void *LoadPointer (void **addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadPointer") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.LoadPointer") __attribute__ ((no_split_stack)); void * @@ -265,7 +265,7 @@ LoadPointer (void **addr) } void StoreInt32 (int32_t *addr, int32_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreInt32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.StoreInt32") __attribute__ ((no_split_stack)); void @@ -279,7 +279,7 @@ StoreInt32 (int32_t *addr, int32_t val) } void StoreInt64 (int64_t *addr, int64_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreInt64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.StoreInt64") __attribute__ ((no_split_stack)); void @@ -295,7 +295,7 @@ StoreInt64 (int64_t *addr, int64_t val) } void StoreUint32 (uint32_t *addr, uint32_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreUint32") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.StoreUint32") __attribute__ ((no_split_stack)); void @@ -309,7 +309,7 @@ StoreUint32 (uint32_t *addr, uint32_t val) } void StoreUint64 (uint64_t *addr, uint64_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreUint64") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.StoreUint64") __attribute__ ((no_split_stack)); void @@ -325,7 +325,7 @@ StoreUint64 (uint64_t *addr, uint64_t val) } void StoreUintptr (uintptr_t *addr, uintptr_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreUintptr") + __asm__ (GOSYM_PREFIX "sync..z2fatomic.StoreUintptr") __attribute__ ((no_split_stack)); void |