diff options
Diffstat (limited to 'libgo/go/runtime/stubs.go')
-rw-r--r-- | libgo/go/runtime/stubs.go | 62 |
1 files changed, 30 insertions, 32 deletions
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go index 30d87c4..84fa1c7 100644 --- a/libgo/go/runtime/stubs.go +++ b/libgo/go/runtime/stubs.go @@ -109,8 +109,22 @@ func memcmp(a, b unsafe.Pointer, size uintptr) int32 // exported value for testing var hashLoad = loadFactor -// in asm_*.s -func fastrand() uint32 +//go:nosplit +func fastrand() uint32 { + mp := getg().m + fr := mp.fastrand + mx := uint32(int32(fr)>>31) & 0xa8888eef + fr = fr<<1 ^ mx + mp.fastrand = fr + return fr +} + +//go:nosplit +func fastrandn(n uint32) uint32 { + // This is similar to fastrand() % n, but faster. + // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + return uint32(uint64(fastrand()) * uint64(n) >> 32) +} //go:linkname sync_fastrand sync.fastrand func sync_fastrand() uint32 { return fastrand() } @@ -156,7 +170,7 @@ type neverCallThisFunction struct{} // This function must never be called directly. Call goexit1 instead. // gentraceback assumes that goexit terminates the stack. A direct // call on the stack will cause gentraceback to stop walking the stack -// prematurely and if there are leftover stack barriers it may panic. +// prematurely and if there is leftover state it may panic. func goexit(neverCallThisFunction) // publicationBarrier performs a store/store barrier (a "publication" @@ -176,9 +190,6 @@ func goexit(neverCallThisFunction) // data dependency ordering. func publicationBarrier() -//go:noescape -func setcallerpc(argp unsafe.Pointer, pc uintptr) - // getcallerpc returns the program counter (PC) of its caller's caller. // getcallersp returns the stack pointer (SP) of its caller's caller. // For both, the argp must be a pointer to the caller's first function argument. @@ -213,12 +224,14 @@ func getcallerpc(argp unsafe.Pointer) uintptr //go:noescape func getcallersp(argp unsafe.Pointer) uintptr +func asmcgocall(fn, arg unsafe.Pointer) int32 { + throw("asmcgocall") + return 0 +} + // argp used in Defer structs when there is no argp. const _NoArgs = ^uintptr(0) -//go:linkname time_now time.now -func time_now() (sec int64, nsec int32) - //extern __builtin_prefetch func prefetch(addr unsafe.Pointer, rw int32, locality int32) @@ -238,13 +251,6 @@ func prefetchnta(addr uintptr) { prefetch(unsafe.Pointer(addr), 0, 0) } -// For gccgo, expose this for C callers. -//go:linkname unixnanotime runtime.unixnanotime -func unixnanotime() int64 { - sec, nsec := time_now() - return sec*1e9 + int64(nsec) -} - // round n up to a multiple of a. a must be a power of 2. func round(n, a uintptr) uintptr { return (n + a - 1) &^ (a - 1) @@ -315,18 +321,6 @@ func entersyscallblock(int32) // Here for gccgo until we port mgc.go. func GC() -// For gccgo to call from C code. -//go:linkname acquireWorldsema runtime.acquireWorldsema -func acquireWorldsema() { - semacquire(&worldsema, 0) -} - -// For gccgo to call from C code. -//go:linkname releaseWorldsema runtime.releaseWorldsema -func releaseWorldsema() { - semrelease(&worldsema) -} - // For gccgo to call from C code, so that the C code and the Go code // can share the memstats variable for now. //go:linkname getMstats runtime.getMstats @@ -436,10 +430,6 @@ func setpagesize(s uintptr) { } } -// Temporary for gccgo until we port more of proc.go. -func sigprofNonGoPC(pc uintptr) { -} - // Temporary for gccgo until we port mgc.go. //go:linkname runtime_m0 runtime.runtime_m0 func runtime_m0() *m { @@ -458,3 +448,11 @@ type bitvector struct { n int32 // # of bits bytedata *uint8 } + +// bool2int returns 0 if x is false or 1 if x is true. +func bool2int(x bool) int { + if x { + return 1 + } + return 0 +} |