diff options
author | Ian Lance Taylor <iant@google.com> | 2015-01-15 00:27:56 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2015-01-15 00:27:56 +0000 |
commit | f8d9fa9e80b57f89e7877ce6cad8a3464879009b (patch) | |
tree | 58a1724fee16d2b03c65678c4dd9b50bb97137a9 /libgo/go/runtime | |
parent | 6bd3f109d8d8fa58eeccd6b3504721b4f20c00c2 (diff) | |
download | gcc-f8d9fa9e80b57f89e7877ce6cad8a3464879009b.zip gcc-f8d9fa9e80b57f89e7877ce6cad8a3464879009b.tar.gz gcc-f8d9fa9e80b57f89e7877ce6cad8a3464879009b.tar.bz2 |
libgo, compiler: Upgrade libgo to Go 1.4, except for runtime.
This upgrades all of libgo other than the runtime package to
the Go 1.4 release. In Go 1.4 much of the runtime was
rewritten into Go. Merging that code will take more time and
will not change the API, so I'm putting it off for now.
There are a few runtime changes anyhow, to accomodate other
packages that rely on minor modifications to the runtime
support.
The compiler changes slightly to add a one-bit flag to each
type descriptor kind that is stored directly in an interface,
which for gccgo is currently only pointer types. Another
one-bit flag (gcprog) is reserved because it is used by the gc
compiler, but gccgo does not currently use it.
There is another error check in the compiler since I ran
across it during testing.
gotools/:
* Makefile.am (go_cmd_go_files): Sort entries. Add generate.go.
* Makefile.in: Rebuild.
From-SVN: r219627
Diffstat (limited to 'libgo/go/runtime')
73 files changed, 11308 insertions, 499 deletions
diff --git a/libgo/go/runtime/arch_386.go b/libgo/go/runtime/arch_386.go new file mode 100644 index 0000000..79d38c7 --- /dev/null +++ b/libgo/go/runtime/arch_386.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint32 +type intptr int32 // TODO(rsc): remove diff --git a/libgo/go/runtime/arch_amd64.go b/libgo/go/runtime/arch_amd64.go new file mode 100644 index 0000000..270cd7b --- /dev/null +++ b/libgo/go/runtime/arch_amd64.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint64 +type intptr int64 // TODO(rsc): remove diff --git a/libgo/go/runtime/arch_amd64p32.go b/libgo/go/runtime/arch_amd64p32.go new file mode 100644 index 0000000..5c636ae --- /dev/null +++ b/libgo/go/runtime/arch_amd64p32.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint64 +type intptr int32 // TODO(rsc): remove diff --git a/libgo/go/runtime/arch_arm.go b/libgo/go/runtime/arch_arm.go new file mode 100644 index 0000000..79d38c7 --- /dev/null +++ b/libgo/go/runtime/arch_arm.go @@ -0,0 +1,8 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +type uintreg uint32 +type intptr int32 // TODO(rsc): remove diff --git a/libgo/go/runtime/atomic.go b/libgo/go/runtime/atomic.go new file mode 100644 index 0000000..7e9d9b3 --- /dev/null +++ b/libgo/go/runtime/atomic.go @@ -0,0 +1,51 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !arm + +package runtime + +import "unsafe" + +//go:noescape +func xadd(ptr *uint32, delta int32) uint32 + +//go:noescape +func xadd64(ptr *uint64, delta int64) uint64 + +//go:noescape +func xchg(ptr *uint32, new uint32) uint32 + +//go:noescape +func xchg64(ptr *uint64, new uint64) uint64 + +//go:noescape +func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer + +//go:noescape +func xchguintptr(ptr *uintptr, new uintptr) uintptr + +//go:noescape +func atomicload(ptr *uint32) uint32 + +//go:noescape +func atomicload64(ptr *uint64) uint64 + +//go:noescape +func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer + +//go:noescape +func atomicor8(ptr *uint8, val uint8) + +//go:noescape +func cas64(ptr *uint64, old, new uint64) bool + +//go:noescape +func atomicstore(ptr *uint32, val uint32) + +//go:noescape +func atomicstore64(ptr *uint64, val uint64) + +//go:noescape +func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer) diff --git a/libgo/go/runtime/cgocall.go b/libgo/go/runtime/cgocall.go new file mode 100644 index 0000000..7fd9146 --- /dev/null +++ b/libgo/go/runtime/cgocall.go @@ -0,0 +1,279 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Cgo call and callback support. +// +// To call into the C function f from Go, the cgo-generated code calls +// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a +// gcc-compiled function written by cgo. +// +// runtime.cgocall (below) locks g to m, calls entersyscall +// so as not to block other goroutines or the garbage collector, +// and then calls runtime.asmcgocall(_cgo_Cfunc_f, frame). +// +// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack +// (assumed to be an operating system-allocated stack, so safe to run +// gcc-compiled code on) and calls _cgo_Cfunc_f(frame). +// +// _cgo_Cfunc_f invokes the actual C function f with arguments +// taken from the frame structure, records the results in the frame, +// and returns to runtime.asmcgocall. +// +// After it regains control, runtime.asmcgocall switches back to the +// original g (m->curg)'s stack and returns to runtime.cgocall. +// +// After it regains control, runtime.cgocall calls exitsyscall, which blocks +// until this m can run Go code without violating the $GOMAXPROCS limit, +// and then unlocks g from m. +// +// The above description skipped over the possibility of the gcc-compiled +// function f calling back into Go. If that happens, we continue down +// the rabbit hole during the execution of f. +// +// To make it possible for gcc-compiled C code to call a Go function p.GoF, +// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't +// know about packages). The gcc-compiled C function f calls GoF. +// +// GoF calls crosscall2(_cgoexp_GoF, frame, framesize). Crosscall2 +// (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument +// adapter from the gcc function call ABI to the 6c function call ABI. +// It is called from gcc to call 6c functions. In this case it calls +// _cgoexp_GoF(frame, framesize), still running on m->g0's stack +// and outside the $GOMAXPROCS limit. Thus, this code cannot yet +// call arbitrary Go code directly and must be careful not to allocate +// memory or use up m->g0's stack. +// +// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize). +// (The reason for having _cgoexp_GoF instead of writing a crosscall3 +// to make this call directly is that _cgoexp_GoF, because it is compiled +// with 6c instead of gcc, can refer to dotted names like +// runtime.cgocallback and p.GoF.) +// +// runtime.cgocallback (in asm_$GOARCH.s) switches from m->g0's +// stack to the original g (m->curg)'s stack, on which it calls +// runtime.cgocallbackg(p.GoF, frame, framesize). +// As part of the stack switch, runtime.cgocallback saves the current +// SP as m->g0->sched.sp, so that any use of m->g0's stack during the +// execution of the callback will be done below the existing stack frames. +// Before overwriting m->g0->sched.sp, it pushes the old value on the +// m->g0 stack, so that it can be restored later. +// +// runtime.cgocallbackg (below) is now running on a real goroutine +// stack (not an m->g0 stack). First it calls runtime.exitsyscall, which will +// block until the $GOMAXPROCS limit allows running this goroutine. +// Once exitsyscall has returned, it is safe to do things like call the memory +// allocator or invoke the Go callback function p.GoF. runtime.cgocallbackg +// first defers a function to unwind m->g0.sched.sp, so that if p.GoF +// panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack +// and the m->curg stack will be unwound in lock step. +// Then it calls p.GoF. Finally it pops but does not execute the deferred +// function, calls runtime.entersyscall, and returns to runtime.cgocallback. +// +// After it regains control, runtime.cgocallback switches back to +// m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old +// m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF. +// +// _cgoexp_GoF immediately returns to crosscall2, which restores the +// callee-save registers for gcc and returns to GoF, which returns to f. + +package runtime + +import "unsafe" + +// Call from Go to C. +//go:nosplit +func cgocall(fn, arg unsafe.Pointer) { + cgocall_errno(fn, arg) +} + +//go:nosplit +func cgocall_errno(fn, arg unsafe.Pointer) int32 { + if !iscgo && GOOS != "solaris" && GOOS != "windows" { + gothrow("cgocall unavailable") + } + + if fn == nil { + gothrow("cgocall nil") + } + + if raceenabled { + racereleasemerge(unsafe.Pointer(&racecgosync)) + } + + // Create an extra M for callbacks on threads not created by Go on first cgo call. + if needextram == 1 && cas(&needextram, 1, 0) { + onM(newextram) + } + + /* + * Lock g to m to ensure we stay on the same stack if we do a + * cgo callback. Add entry to defer stack in case of panic. + */ + lockOSThread() + mp := getg().m + mp.ncgocall++ + mp.ncgo++ + defer endcgo(mp) + + /* + * Announce we are entering a system call + * so that the scheduler knows to create another + * M to run goroutines while we are in the + * foreign code. + * + * The call to asmcgocall is guaranteed not to + * split the stack and does not allocate memory, + * so it is safe to call while "in a system call", outside + * the $GOMAXPROCS accounting. + */ + entersyscall() + errno := asmcgocall_errno(fn, arg) + exitsyscall() + + return errno +} + +//go:nosplit +func endcgo(mp *m) { + mp.ncgo-- + if mp.ncgo == 0 { + // We are going back to Go and are not in a recursive + // call. Let the GC collect any memory allocated via + // _cgo_allocate that is no longer referenced. + mp.cgomal = nil + } + + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + + unlockOSThread() // invalidates mp +} + +// Helper functions for cgo code. + +// Filled by schedinit from corresponding C variables, +// which are in turn filled in by dynamic linker when Cgo is available. +var cgoMalloc, cgoFree unsafe.Pointer + +func cmalloc(n uintptr) unsafe.Pointer { + var args struct { + n uint64 + ret unsafe.Pointer + } + args.n = uint64(n) + cgocall(cgoMalloc, unsafe.Pointer(&args)) + if args.ret == nil { + gothrow("C malloc failed") + } + return args.ret +} + +func cfree(p unsafe.Pointer) { + cgocall(cgoFree, p) +} + +// Call from C back to Go. +//go:nosplit +func cgocallbackg() { + gp := getg() + if gp != gp.m.curg { + println("runtime: bad g in cgocallback") + exit(2) + } + + // entersyscall saves the caller's SP to allow the GC to trace the Go + // stack. However, since we're returning to an earlier stack frame and + // need to pair with the entersyscall() call made by cgocall, we must + // save syscall* and let reentersyscall restore them. + savedsp := unsafe.Pointer(gp.syscallsp) + savedpc := gp.syscallpc + exitsyscall() // coming out of cgo call + cgocallbackg1() + // going back to cgo call + reentersyscall(savedpc, savedsp) +} + +func cgocallbackg1() { + gp := getg() + if gp.m.needextram { + gp.m.needextram = false + onM(newextram) + } + + // Add entry to defer stack in case of panic. + restore := true + defer unwindm(&restore) + + if raceenabled { + raceacquire(unsafe.Pointer(&racecgosync)) + } + + type args struct { + fn *funcval + arg unsafe.Pointer + argsize uintptr + } + var cb *args + + // Location of callback arguments depends on stack frame layout + // and size of stack frame of cgocallback_gofunc. + sp := gp.m.g0.sched.sp + switch GOARCH { + default: + gothrow("cgocallbackg is unimplemented on arch") + case "arm": + // On arm, stack frame is two words and there's a saved LR between + // SP and the stack frame and between the stack frame and the arguments. + cb = (*args)(unsafe.Pointer(sp + 4*ptrSize)) + case "amd64": + // On amd64, stack frame is one word, plus caller PC. + cb = (*args)(unsafe.Pointer(sp + 2*ptrSize)) + case "386": + // On 386, stack frame is three words, plus caller PC. + cb = (*args)(unsafe.Pointer(sp + 4*ptrSize)) + } + + // Invoke callback. + reflectcall(unsafe.Pointer(cb.fn), unsafe.Pointer(cb.arg), uint32(cb.argsize), 0) + + if raceenabled { + racereleasemerge(unsafe.Pointer(&racecgosync)) + } + + // Do not unwind m->g0->sched.sp. + // Our caller, cgocallback, will do that. + restore = false +} + +func unwindm(restore *bool) { + if !*restore { + return + } + // Restore sp saved by cgocallback during + // unwind of g's stack (see comment at top of file). + mp := acquirem() + sched := &mp.g0.sched + switch GOARCH { + default: + gothrow("unwindm not implemented") + case "386", "amd64": + sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp)) + case "arm": + sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 4)) + } + releasem(mp) +} + +// called from assembly +func badcgocallback() { + gothrow("misaligned stack in cgocallback") +} + +// called from (incomplete) assembly +func cgounimpl() { + gothrow("cgo not implemented") +} + +var racecgosync uint64 // represents possible synchronization in C code diff --git a/libgo/go/runtime/cgocallback.go b/libgo/go/runtime/cgocallback.go new file mode 100644 index 0000000..2c89143 --- /dev/null +++ b/libgo/go/runtime/cgocallback.go @@ -0,0 +1,40 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// These functions are called from C code via cgo/callbacks.c. + +// Allocate memory. This allocates the requested number of bytes in +// memory controlled by the Go runtime. The allocated memory will be +// zeroed. You are responsible for ensuring that the Go garbage +// collector can see a pointer to the allocated memory for as long as +// it is valid, e.g., by storing a pointer in a local variable in your +// C function, or in memory allocated by the Go runtime. If the only +// pointers are in a C global variable or in memory allocated via +// malloc, then the Go garbage collector may collect the memory. +// +// TODO(rsc,iant): This memory is untyped. +// Either we need to add types or we need to stop using it. + +func _cgo_allocate_internal(len uintptr) unsafe.Pointer { + if len == 0 { + len = 1 + } + ret := unsafe.Pointer(&make([]unsafe.Pointer, (len+ptrSize-1)/ptrSize)[0]) + c := new(cgomal) + c.alloc = ret + gp := getg() + c.next = gp.m.cgomal + gp.m.cgomal = c + return ret +} + +// Panic. + +func _cgo_panic_internal(p *byte) { + panic(gostringnocopy(p)) +} diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go new file mode 100644 index 0000000..0eb87df --- /dev/null +++ b/libgo/go/runtime/chan.go @@ -0,0 +1,655 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go channels. + +import "unsafe" + +const ( + maxAlign = 8 + hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1)) + debugChan = false +) + +// TODO(khr): make hchan.buf an unsafe.Pointer, not a *uint8 + +func makechan(t *chantype, size int64) *hchan { + elem := t.elem + + // compiler checks this but be safe. + if elem.size >= 1<<16 { + gothrow("makechan: invalid channel element type") + } + if hchanSize%maxAlign != 0 || elem.align > maxAlign { + gothrow("makechan: bad alignment") + } + if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (maxmem-hchanSize)/uintptr(elem.size)) { + panic("makechan: size out of range") + } + + var c *hchan + if elem.kind&kindNoPointers != 0 || size == 0 { + // Allocate memory in one call. + // Hchan does not contain pointers interesting for GC in this case: + // buf points into the same allocation, elemtype is persistent. + // SudoG's are referenced from their owning thread so they can't be collected. + // TODO(dvyukov,rlh): Rethink when collector can move allocated objects. + c = (*hchan)(mallocgc(hchanSize+uintptr(size)*uintptr(elem.size), nil, flagNoScan)) + if size > 0 && elem.size != 0 { + c.buf = (*uint8)(add(unsafe.Pointer(c), hchanSize)) + } else { + c.buf = (*uint8)(unsafe.Pointer(c)) // race detector uses this location for synchronization + } + } else { + c = new(hchan) + c.buf = (*uint8)(newarray(elem, uintptr(size))) + } + c.elemsize = uint16(elem.size) + c.elemtype = elem + c.dataqsiz = uint(size) + + if debugChan { + print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n") + } + return c +} + +// chanbuf(c, i) is pointer to the i'th slot in the buffer. +func chanbuf(c *hchan, i uint) unsafe.Pointer { + return add(unsafe.Pointer(c.buf), uintptr(i)*uintptr(c.elemsize)) +} + +// entry point for c <- x from compiled code +//go:nosplit +func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) { + chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t))) +} + +/* + * generic single channel send/recv + * If block is not nil, + * then the protocol will not + * sleep but return if it could + * not complete. + * + * sleep can wake up with g.param == nil + * when a channel involved in the sleep has + * been closed. it is easiest to loop and re-run + * the operation; we'll see that it's now closed. + */ +func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool { + if raceenabled { + raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend)) + } + + if c == nil { + if !block { + return false + } + gopark(nil, nil, "chan send (nil chan)") + gothrow("unreachable") + } + + if debugChan { + print("chansend: chan=", c, "\n") + } + + if raceenabled { + racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend)) + } + + // Fast path: check for failed non-blocking operation without acquiring the lock. + // + // After observing that the channel is not closed, we observe that the channel is + // not ready for sending. Each of these observations is a single word-sized read + // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel). + // Because a closed channel cannot transition from 'ready for sending' to + // 'not ready for sending', even if the channel is closed between the two observations, + // they imply a moment between the two when the channel was both not yet closed + // and not ready for sending. We behave as if we observed the channel at that moment, + // and report that the send cannot proceed. + // + // It is okay if the reads are reordered here: if we observe that the channel is not + // ready for sending and then observe that it is not closed, that implies that the + // channel wasn't closed during the first observation. + if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) || + (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) { + return false + } + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + } + + lock(&c.lock) + if c.closed != 0 { + unlock(&c.lock) + panic("send on closed channel") + } + + if c.dataqsiz == 0 { // synchronous channel + sg := c.recvq.dequeue() + if sg != nil { // found a waiting receiver + if raceenabled { + racesync(c, sg) + } + unlock(&c.lock) + + recvg := sg.g + if sg.elem != nil { + memmove(unsafe.Pointer(sg.elem), ep, uintptr(c.elemsize)) + sg.elem = nil + } + recvg.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(recvg) + return true + } + + if !block { + unlock(&c.lock) + return false + } + + // no receiver available: block on this channel. + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.elem = ep + mysg.waitlink = nil + gp.waiting = mysg + mysg.g = gp + mysg.selectdone = nil + gp.param = nil + c.sendq.enqueue(mysg) + goparkunlock(&c.lock, "chan send") + + // someone woke us up. + if mysg != gp.waiting { + gothrow("G waiting list is corrupted!") + } + gp.waiting = nil + if gp.param == nil { + if c.closed == 0 { + gothrow("chansend: spurious wakeup") + } + panic("send on closed channel") + } + gp.param = nil + if mysg.releasetime > 0 { + blockevent(int64(mysg.releasetime)-t0, 2) + } + releaseSudog(mysg) + return true + } + + // asynchronous channel + // wait for some space to write our data + var t1 int64 + for c.qcount >= c.dataqsiz { + if !block { + unlock(&c.lock) + return false + } + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.g = gp + mysg.elem = nil + mysg.selectdone = nil + c.sendq.enqueue(mysg) + goparkunlock(&c.lock, "chan send") + + // someone woke us up - try again + if mysg.releasetime > 0 { + t1 = mysg.releasetime + } + releaseSudog(mysg) + lock(&c.lock) + if c.closed != 0 { + unlock(&c.lock) + panic("send on closed channel") + } + } + + // write our data into the channel buffer + if raceenabled { + raceacquire(chanbuf(c, c.sendx)) + racerelease(chanbuf(c, c.sendx)) + } + memmove(chanbuf(c, c.sendx), ep, uintptr(c.elemsize)) + c.sendx++ + if c.sendx == c.dataqsiz { + c.sendx = 0 + } + c.qcount++ + + // wake up a waiting receiver + sg := c.recvq.dequeue() + if sg != nil { + recvg := sg.g + unlock(&c.lock) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(recvg) + } else { + unlock(&c.lock) + } + if t1 > 0 { + blockevent(t1-t0, 2) + } + return true +} + +func closechan(c *hchan) { + if c == nil { + panic("close of nil channel") + } + + lock(&c.lock) + if c.closed != 0 { + unlock(&c.lock) + panic("close of closed channel") + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&c)) + racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan)) + racerelease(unsafe.Pointer(c)) + } + + c.closed = 1 + + // release all readers + for { + sg := c.recvq.dequeue() + if sg == nil { + break + } + gp := sg.g + sg.elem = nil + gp.param = nil + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } + + // release all writers + for { + sg := c.sendq.dequeue() + if sg == nil { + break + } + gp := sg.g + sg.elem = nil + gp.param = nil + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } + unlock(&c.lock) +} + +// entry points for <- c from compiled code +//go:nosplit +func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) { + chanrecv(t, c, elem, true) +} + +//go:nosplit +func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) { + _, received = chanrecv(t, c, elem, true) + return +} + +// chanrecv receives on channel c and writes the received data to ep. +// ep may be nil, in which case received data is ignored. +// If block == false and no elements are available, returns (false, false). +// Otherwise, if c is closed, zeros *ep and returns (true, false). +// Otherwise, fills in *ep with an element and returns (true, true). +func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) { + // raceenabled: don't need to check ep, as it is always on the stack. + + if debugChan { + print("chanrecv: chan=", c, "\n") + } + + if c == nil { + if !block { + return + } + gopark(nil, nil, "chan receive (nil chan)") + gothrow("unreachable") + } + + // Fast path: check for failed non-blocking operation without acquiring the lock. + // + // After observing that the channel is not ready for receiving, we observe that the + // channel is not closed. Each of these observations is a single word-sized read + // (first c.sendq.first or c.qcount, and second c.closed). + // Because a channel cannot be reopened, the later observation of the channel + // being not closed implies that it was also not closed at the moment of the + // first observation. We behave as if we observed the channel at that moment + // and report that the receive cannot proceed. + // + // The order of operations is important here: reversing the operations can lead to + // incorrect behavior when racing with a close. + if !block && (c.dataqsiz == 0 && c.sendq.first == nil || + c.dataqsiz > 0 && atomicloaduint(&c.qcount) == 0) && + atomicload(&c.closed) == 0 { + return + } + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + } + + lock(&c.lock) + if c.dataqsiz == 0 { // synchronous channel + if c.closed != 0 { + return recvclosed(c, ep) + } + + sg := c.sendq.dequeue() + if sg != nil { + if raceenabled { + racesync(c, sg) + } + unlock(&c.lock) + + if ep != nil { + memmove(ep, sg.elem, uintptr(c.elemsize)) + } + sg.elem = nil + gp := sg.g + gp.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + selected = true + received = true + return + } + + if !block { + unlock(&c.lock) + return + } + + // no sender available: block on this channel. + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.elem = ep + mysg.waitlink = nil + gp.waiting = mysg + mysg.g = gp + mysg.selectdone = nil + gp.param = nil + c.recvq.enqueue(mysg) + goparkunlock(&c.lock, "chan receive") + + // someone woke us up + if mysg != gp.waiting { + gothrow("G waiting list is corrupted!") + } + gp.waiting = nil + if mysg.releasetime > 0 { + blockevent(mysg.releasetime-t0, 2) + } + haveData := gp.param != nil + gp.param = nil + releaseSudog(mysg) + + if haveData { + // a sender sent us some data. It already wrote to ep. + selected = true + received = true + return + } + + lock(&c.lock) + if c.closed == 0 { + gothrow("chanrecv: spurious wakeup") + } + return recvclosed(c, ep) + } + + // asynchronous channel + // wait for some data to appear + var t1 int64 + for c.qcount <= 0 { + if c.closed != 0 { + selected, received = recvclosed(c, ep) + if t1 > 0 { + blockevent(t1-t0, 2) + } + return + } + + if !block { + unlock(&c.lock) + return + } + + // wait for someone to send an element + gp := getg() + mysg := acquireSudog() + mysg.releasetime = 0 + if t0 != 0 { + mysg.releasetime = -1 + } + mysg.elem = nil + mysg.g = gp + mysg.selectdone = nil + + c.recvq.enqueue(mysg) + goparkunlock(&c.lock, "chan receive") + + // someone woke us up - try again + if mysg.releasetime > 0 { + t1 = mysg.releasetime + } + releaseSudog(mysg) + lock(&c.lock) + } + + if raceenabled { + raceacquire(chanbuf(c, c.recvx)) + racerelease(chanbuf(c, c.recvx)) + } + if ep != nil { + memmove(ep, chanbuf(c, c.recvx), uintptr(c.elemsize)) + } + memclr(chanbuf(c, c.recvx), uintptr(c.elemsize)) + + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.qcount-- + + // ping a sender now that there is space + sg := c.sendq.dequeue() + if sg != nil { + gp := sg.g + unlock(&c.lock) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } else { + unlock(&c.lock) + } + + if t1 > 0 { + blockevent(t1-t0, 2) + } + selected = true + received = true + return +} + +// recvclosed is a helper function for chanrecv. Handles cleanup +// when the receiver encounters a closed channel. +// Caller must hold c.lock, recvclosed will release the lock. +func recvclosed(c *hchan, ep unsafe.Pointer) (selected, recevied bool) { + if raceenabled { + raceacquire(unsafe.Pointer(c)) + } + unlock(&c.lock) + if ep != nil { + memclr(ep, uintptr(c.elemsize)) + } + return true, false +} + +// compiler implements +// +// select { +// case c <- v: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if selectnbsend(c, v) { +// ... foo +// } else { +// ... bar +// } +// +func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) { + return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t))) +} + +// compiler implements +// +// select { +// case v = <-c: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if selectnbrecv(&v, c) { +// ... foo +// } else { +// ... bar +// } +// +func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) { + selected, _ = chanrecv(t, c, elem, false) + return +} + +// compiler implements +// +// select { +// case v, ok = <-c: +// ... foo +// default: +// ... bar +// } +// +// as +// +// if c != nil && selectnbrecv2(&v, &ok, c) { +// ... foo +// } else { +// ... bar +// } +// +func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) { + // TODO(khr): just return 2 values from this function, now that it is in Go. + selected, *received = chanrecv(t, c, elem, false) + return +} + +func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) { + return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t))) +} + +func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) { + return chanrecv(t, c, elem, !nb) +} + +func reflect_chanlen(c *hchan) int { + if c == nil { + return 0 + } + return int(c.qcount) +} + +func reflect_chancap(c *hchan) int { + if c == nil { + return 0 + } + return int(c.dataqsiz) +} + +func (q *waitq) enqueue(sgp *sudog) { + sgp.next = nil + if q.first == nil { + q.first = sgp + q.last = sgp + return + } + q.last.next = sgp + q.last = sgp +} + +func (q *waitq) dequeue() *sudog { + for { + sgp := q.first + if sgp == nil { + return nil + } + q.first = sgp.next + sgp.next = nil + if q.last == sgp { + q.last = nil + } + + // if sgp participates in a select and is already signaled, ignore it + if sgp.selectdone != nil { + // claim the right to signal + if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) { + continue + } + } + + return sgp + } +} + +func racesync(c *hchan, sg *sudog) { + racerelease(chanbuf(c, 0)) + raceacquireg(sg.g, chanbuf(c, 0)) + racereleaseg(sg.g, chanbuf(c, 0)) + raceacquire(chanbuf(c, 0)) +} diff --git a/libgo/go/runtime/chan_test.go b/libgo/go/runtime/chan_test.go index ce4b396..e689cea 100644 --- a/libgo/go/runtime/chan_test.go +++ b/libgo/go/runtime/chan_test.go @@ -198,6 +198,26 @@ func TestChan(t *testing.T) { } } +func TestNonblockRecvRace(t *testing.T) { + n := 10000 + if testing.Short() { + n = 100 + } + for i := 0; i < n; i++ { + c := make(chan int, 1) + c <- 1 + go func() { + select { + case <-c: + default: + t.Fatal("chan is not ready") + } + }() + close(c) + <-c + } +} + func TestSelfSelect(t *testing.T) { // Ensure that send/recv on the same chan in select // does not crash nor deadlock. @@ -430,6 +450,67 @@ func TestMultiConsumer(t *testing.T) { } } +func TestShrinkStackDuringBlockedSend(t *testing.T) { + // make sure that channel operations still work when we are + // blocked on a channel send and we shrink the stack. + // NOTE: this test probably won't fail unless stack.c:StackDebug + // is set to >= 1. + const n = 10 + c := make(chan int) + done := make(chan struct{}) + + go func() { + for i := 0; i < n; i++ { + c <- i + // use lots of stack, briefly. + stackGrowthRecursive(20) + } + done <- struct{}{} + }() + + for i := 0; i < n; i++ { + x := <-c + if x != i { + t.Errorf("bad channel read: want %d, got %d", i, x) + } + // Waste some time so sender can finish using lots of stack + // and block in channel send. + time.Sleep(1 * time.Millisecond) + // trigger GC which will shrink the stack of the sender. + runtime.GC() + } + <-done +} + +func TestSelectDuplicateChannel(t *testing.T) { + // This test makes sure we can queue a G on + // the same channel multiple times. + c := make(chan int) + d := make(chan int) + e := make(chan int) + + // goroutine A + go func() { + select { + case <-c: + case <-c: + case <-d: + } + e <- 9 + }() + time.Sleep(time.Millisecond) // make sure goroutine A gets qeueued first on c + + // goroutine B + go func() { + <-c + }() + time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing + + d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq. + <-e // A tells us it's done + c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B) +} + func BenchmarkChanNonblocking(b *testing.B) { myc := make(chan int) b.RunParallel(func(pb *testing.PB) { @@ -458,7 +539,35 @@ func BenchmarkSelectUncontended(b *testing.B) { }) } -func BenchmarkSelectContended(b *testing.B) { +func BenchmarkSelectSyncContended(b *testing.B) { + myc1 := make(chan int) + myc2 := make(chan int) + myc3 := make(chan int) + done := make(chan int) + b.RunParallel(func(pb *testing.PB) { + go func() { + for { + select { + case myc1 <- 0: + case myc2 <- 0: + case myc3 <- 0: + case <-done: + return + } + } + }() + for pb.Next() { + select { + case <-myc1: + case <-myc2: + case <-myc3: + } + } + }) + close(done) +} + +func BenchmarkSelectAsyncContended(b *testing.B) { procs := runtime.GOMAXPROCS(0) myc1 := make(chan int, procs) myc2 := make(chan int, procs) @@ -476,11 +585,11 @@ func BenchmarkSelectContended(b *testing.B) { } func BenchmarkSelectNonblock(b *testing.B) { + myc1 := make(chan int) + myc2 := make(chan int) + myc3 := make(chan int, 1) + myc4 := make(chan int, 1) b.RunParallel(func(pb *testing.PB) { - myc1 := make(chan int) - myc2 := make(chan int) - myc3 := make(chan int, 1) - myc4 := make(chan int, 1) for pb.Next() { select { case <-myc1: diff --git a/libgo/go/runtime/complex.go b/libgo/go/runtime/complex.go new file mode 100644 index 0000000..ec50f89 --- /dev/null +++ b/libgo/go/runtime/complex.go @@ -0,0 +1,52 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +func complex128div(n complex128, d complex128) complex128 { + // Special cases as in C99. + ninf := real(n) == posinf || real(n) == neginf || + imag(n) == posinf || imag(n) == neginf + dinf := real(d) == posinf || real(d) == neginf || + imag(d) == posinf || imag(d) == neginf + + nnan := !ninf && (real(n) != real(n) || imag(n) != imag(n)) + dnan := !dinf && (real(d) != real(d) || imag(d) != imag(d)) + + switch { + case nnan || dnan: + return complex(nan, nan) + case ninf && !dinf: + return complex(posinf, posinf) + case !ninf && dinf: + return complex(0, 0) + case real(d) == 0 && imag(d) == 0: + if real(n) == 0 && imag(n) == 0 { + return complex(nan, nan) + } else { + return complex(posinf, posinf) + } + default: + // Standard complex arithmetic, factored to avoid unnecessary overflow. + a := real(d) + if a < 0 { + a = -a + } + b := imag(d) + if b < 0 { + b = -b + } + if a <= b { + ratio := real(d) / imag(d) + denom := real(d)*ratio + imag(d) + return complex((real(n)*ratio+imag(n))/denom, + (imag(n)*ratio-real(n))/denom) + } else { + ratio := imag(d) / real(d) + denom := imag(d)*ratio + real(d) + return complex((imag(n)*ratio+real(n))/denom, + (imag(n)-real(n)*ratio)/denom) + } + } +} diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go new file mode 100644 index 0000000..8b1c1c6 --- /dev/null +++ b/libgo/go/runtime/cpuprof.go @@ -0,0 +1,425 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CPU profiling. +// Based on algorithms and data structures used in +// http://code.google.com/p/google-perftools/. +// +// The main difference between this code and the google-perftools +// code is that this code is written to allow copying the profile data +// to an arbitrary io.Writer, while the google-perftools code always +// writes to an operating system file. +// +// The signal handler for the profiling clock tick adds a new stack trace +// to a hash table tracking counts for recent traces. Most clock ticks +// hit in the cache. In the event of a cache miss, an entry must be +// evicted from the hash table, copied to a log that will eventually be +// written as profile data. The google-perftools code flushed the +// log itself during the signal handler. This code cannot do that, because +// the io.Writer might block or need system calls or locks that are not +// safe to use from within the signal handler. Instead, we split the log +// into two halves and let the signal handler fill one half while a goroutine +// is writing out the other half. When the signal handler fills its half, it +// offers to swap with the goroutine. If the writer is not done with its half, +// we lose the stack trace for this clock tick (and record that loss). +// The goroutine interacts with the signal handler by calling getprofile() to +// get the next log piece to write, implicitly handing back the last log +// piece it obtained. +// +// The state of this dance between the signal handler and the goroutine +// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine +// is not using either log half and is waiting (or will soon be waiting) for +// a new piece by calling notesleep(&p->wait). If the signal handler +// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait) +// to wake the goroutine. The value indicates the number of entries in the +// log half being handed off. The goroutine leaves the non-zero value in +// place until it has finished processing the log half and then flips the number +// back to zero. Setting the high bit in handoff means that the profiling is over, +// and the goroutine is now in charge of flushing the data left in the hash table +// to the log and returning that data. +// +// The handoff field is manipulated using atomic operations. +// For the most part, the manipulation of handoff is orderly: if handoff == 0 +// then the signal handler owns it and can change it to non-zero. +// If handoff != 0 then the goroutine owns it and can change it to zero. +// If that were the end of the story then we would not need to manipulate +// handoff using atomic operations. The operations are needed, however, +// in order to let the log closer set the high bit to indicate "EOF" safely +// in the situation when normally the goroutine "owns" handoff. + +package runtime + +import "unsafe" + +const ( + numBuckets = 1 << 10 + logSize = 1 << 17 + assoc = 4 + maxCPUProfStack = 64 +) + +type cpuprofEntry struct { + count uintptr + depth uintptr + stack [maxCPUProfStack]uintptr +} + +type cpuProfile struct { + on bool // profiling is on + wait note // goroutine waits here + count uintptr // tick count + evicts uintptr // eviction count + lost uintptr // lost ticks that need to be logged + + // Active recent stack traces. + hash [numBuckets]struct { + entry [assoc]cpuprofEntry + } + + // Log of traces evicted from hash. + // Signal handler has filled log[toggle][:nlog]. + // Goroutine is writing log[1-toggle][:handoff]. + log [2][logSize / 2]uintptr + nlog uintptr + toggle int32 + handoff uint32 + + // Writer state. + // Writer maintains its own toggle to avoid races + // looking at signal handler's toggle. + wtoggle uint32 + wholding bool // holding & need to release a log half + flushing bool // flushing hash table - profile is over + eodSent bool // special end-of-data record sent; => flushing +} + +var ( + cpuprofLock mutex + cpuprof *cpuProfile + + eod = [3]uintptr{0, 1, 0} +) + +func setcpuprofilerate_m() // proc.c + +func setcpuprofilerate(hz int32) { + g := getg() + g.m.scalararg[0] = uintptr(hz) + onM(setcpuprofilerate_m) +} + +// lostProfileData is a no-op function used in profiles +// to mark the number of profiling stack traces that were +// discarded due to slow data writers. +func lostProfileData() {} + +// SetCPUProfileRate sets the CPU profiling rate to hz samples per second. +// If hz <= 0, SetCPUProfileRate turns off profiling. +// If the profiler is on, the rate cannot be changed without first turning it off. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.cpuprofile flag instead of calling +// SetCPUProfileRate directly. +func SetCPUProfileRate(hz int) { + // Clamp hz to something reasonable. + if hz < 0 { + hz = 0 + } + if hz > 1000000 { + hz = 1000000 + } + + lock(&cpuprofLock) + if hz > 0 { + if cpuprof == nil { + cpuprof = (*cpuProfile)(sysAlloc(unsafe.Sizeof(cpuProfile{}), &memstats.other_sys)) + if cpuprof == nil { + print("runtime: cpu profiling cannot allocate memory\n") + unlock(&cpuprofLock) + return + } + } + if cpuprof.on || cpuprof.handoff != 0 { + print("runtime: cannot set cpu profile rate until previous profile has finished.\n") + unlock(&cpuprofLock) + return + } + + cpuprof.on = true + // pprof binary header format. + // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117 + p := &cpuprof.log[0] + p[0] = 0 // count for header + p[1] = 3 // depth for header + p[2] = 0 // version number + p[3] = uintptr(1e6 / hz) // period (microseconds) + p[4] = 0 + cpuprof.nlog = 5 + cpuprof.toggle = 0 + cpuprof.wholding = false + cpuprof.wtoggle = 0 + cpuprof.flushing = false + cpuprof.eodSent = false + noteclear(&cpuprof.wait) + + setcpuprofilerate(int32(hz)) + } else if cpuprof != nil && cpuprof.on { + setcpuprofilerate(0) + cpuprof.on = false + + // Now add is not running anymore, and getprofile owns the entire log. + // Set the high bit in prof->handoff to tell getprofile. + for { + n := cpuprof.handoff + if n&0x80000000 != 0 { + print("runtime: setcpuprofile(off) twice\n") + } + if cas(&cpuprof.handoff, n, n|0x80000000) { + if n == 0 { + // we did the transition from 0 -> nonzero so we wake getprofile + notewakeup(&cpuprof.wait) + } + break + } + } + } + unlock(&cpuprofLock) +} + +func cpuproftick(pc *uintptr, n int32) { + if n > maxCPUProfStack { + n = maxCPUProfStack + } + s := (*[maxCPUProfStack]uintptr)(unsafe.Pointer(pc))[:n] + cpuprof.add(s) +} + +// add adds the stack trace to the profile. +// It is called from signal handlers and other limited environments +// and cannot allocate memory or acquire locks that might be +// held at the time of the signal, nor can it use substantial amounts +// of stack. It is allowed to call evict. +func (p *cpuProfile) add(pc []uintptr) { + // Compute hash. + h := uintptr(0) + for _, x := range pc { + h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1))) + h += x*31 + x*7 + x*3 + } + p.count++ + + // Add to entry count if already present in table. + b := &p.hash[h%numBuckets] +Assoc: + for i := range b.entry { + e := &b.entry[i] + if e.depth != uintptr(len(pc)) { + continue + } + for j := range pc { + if e.stack[j] != pc[j] { + continue Assoc + } + } + e.count++ + return + } + + // Evict entry with smallest count. + var e *cpuprofEntry + for i := range b.entry { + if e == nil || b.entry[i].count < e.count { + e = &b.entry[i] + } + } + if e.count > 0 { + if !p.evict(e) { + // Could not evict entry. Record lost stack. + p.lost++ + return + } + p.evicts++ + } + + // Reuse the newly evicted entry. + e.depth = uintptr(len(pc)) + e.count = 1 + copy(e.stack[:], pc) +} + +// evict copies the given entry's data into the log, so that +// the entry can be reused. evict is called from add, which +// is called from the profiling signal handler, so it must not +// allocate memory or block. It is safe to call flushlog. +// evict returns true if the entry was copied to the log, +// false if there was no room available. +func (p *cpuProfile) evict(e *cpuprofEntry) bool { + d := e.depth + nslot := d + 2 + log := &p.log[p.toggle] + if p.nlog+nslot > uintptr(len(p.log[0])) { + if !p.flushlog() { + return false + } + log = &p.log[p.toggle] + } + + q := p.nlog + log[q] = e.count + q++ + log[q] = d + q++ + copy(log[q:], e.stack[:d]) + q += d + p.nlog = q + e.count = 0 + return true +} + +// flushlog tries to flush the current log and switch to the other one. +// flushlog is called from evict, called from add, called from the signal handler, +// so it cannot allocate memory or block. It can try to swap logs with +// the writing goroutine, as explained in the comment at the top of this file. +func (p *cpuProfile) flushlog() bool { + if !cas(&p.handoff, 0, uint32(p.nlog)) { + return false + } + notewakeup(&p.wait) + + p.toggle = 1 - p.toggle + log := &p.log[p.toggle] + q := uintptr(0) + if p.lost > 0 { + lostPC := funcPC(lostProfileData) + log[0] = p.lost + log[1] = 1 + log[2] = lostPC + q = 3 + p.lost = 0 + } + p.nlog = q + return true +} + +// getprofile blocks until the next block of profiling data is available +// and returns it as a []byte. It is called from the writing goroutine. +func (p *cpuProfile) getprofile() []byte { + if p == nil { + return nil + } + + if p.wholding { + // Release previous log to signal handling side. + // Loop because we are racing against SetCPUProfileRate(0). + for { + n := p.handoff + if n == 0 { + print("runtime: phase error during cpu profile handoff\n") + return nil + } + if n&0x80000000 != 0 { + p.wtoggle = 1 - p.wtoggle + p.wholding = false + p.flushing = true + goto Flush + } + if cas(&p.handoff, n, 0) { + break + } + } + p.wtoggle = 1 - p.wtoggle + p.wholding = false + } + + if p.flushing { + goto Flush + } + + if !p.on && p.handoff == 0 { + return nil + } + + // Wait for new log. + notetsleepg(&p.wait, -1) + noteclear(&p.wait) + + switch n := p.handoff; { + case n == 0: + print("runtime: phase error during cpu profile wait\n") + return nil + case n == 0x80000000: + p.flushing = true + goto Flush + default: + n &^= 0x80000000 + + // Return new log to caller. + p.wholding = true + + return uintptrBytes(p.log[p.wtoggle][:n]) + } + + // In flush mode. + // Add is no longer being called. We own the log. + // Also, p->handoff is non-zero, so flushlog will return false. + // Evict the hash table into the log and return it. +Flush: + for i := range p.hash { + b := &p.hash[i] + for j := range b.entry { + e := &b.entry[j] + if e.count > 0 && !p.evict(e) { + // Filled the log. Stop the loop and return what we've got. + break Flush + } + } + } + + // Return pending log data. + if p.nlog > 0 { + // Note that we're using toggle now, not wtoggle, + // because we're working on the log directly. + n := p.nlog + p.nlog = 0 + return uintptrBytes(p.log[p.toggle][:n]) + } + + // Made it through the table without finding anything to log. + if !p.eodSent { + // We may not have space to append this to the partial log buf, + // so we always return a new slice for the end-of-data marker. + p.eodSent = true + return uintptrBytes(eod[:]) + } + + // Finally done. Clean up and return nil. + p.flushing = false + if !cas(&p.handoff, p.handoff, 0) { + print("runtime: profile flush racing with something\n") + } + return nil +} + +func uintptrBytes(p []uintptr) (ret []byte) { + pp := (*sliceStruct)(unsafe.Pointer(&p)) + rp := (*sliceStruct)(unsafe.Pointer(&ret)) + + rp.array = pp.array + rp.len = pp.len * int(unsafe.Sizeof(p[0])) + rp.cap = rp.len + + return +} + +// CPUProfile returns the next chunk of binary CPU profiling stack trace data, +// blocking until data is available. If profiling is turned off and all the profile +// data accumulated while it was on has been returned, CPUProfile returns nil. +// The caller must save the returned data before calling CPUProfile again. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.cpuprofile flag instead of calling +// CPUProfile directly. +func CPUProfile() []byte { + return cpuprof.getprofile() +} diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go index b534b89..972eedc 100644 --- a/libgo/go/runtime/crash_cgo_test.go +++ b/libgo/go/runtime/crash_cgo_test.go @@ -8,6 +8,7 @@ package runtime_test import ( "runtime" + "strings" "testing" ) @@ -19,7 +20,6 @@ func TestCgoSignalDeadlock(t *testing.T) { if testing.Short() && runtime.GOOS == "windows" { t.Skip("Skipping in short mode") // takes up to 64 seconds } - t.Skip("gccgo does not have a go command") got := executeTest(t, cgoSignalDeadlockSource, nil) want := "OK\n" if got != want { @@ -35,6 +35,21 @@ func TestCgoTraceback(t *testing.T) { } } +func TestCgoExternalThreadPanic(t *testing.T) { + if runtime.GOOS == "plan9" { + t.Skipf("no pthreads on %s", runtime.GOOS) + } + csrc := cgoExternalThreadPanicC + if runtime.GOOS == "windows" { + csrc = cgoExternalThreadPanicC_windows + } + got := executeTest(t, cgoExternalThreadPanicSource, nil, "main.c", csrc) + want := "panic: BOOM" + if !strings.Contains(got, want) { + t.Fatalf("want failure containing %q. output:\n%s\n", want, got) + } +} + const cgoSignalDeadlockSource = ` package main @@ -118,3 +133,64 @@ func main() { fmt.Printf("OK\n") } ` + +const cgoExternalThreadPanicSource = ` +package main + +// void start(void); +import "C" + +func main() { + C.start() + select {} +} + +//export gopanic +func gopanic() { + panic("BOOM") +} +` + +const cgoExternalThreadPanicC = ` +#include <stdlib.h> +#include <stdio.h> +#include <pthread.h> + +void gopanic(void); + +static void* +die(void* x) +{ + gopanic(); + return 0; +} + +void +start(void) +{ + pthread_t t; + if(pthread_create(&t, 0, die, 0) != 0) + printf("pthread_create failed\n"); +} +` + +const cgoExternalThreadPanicC_windows = ` +#include <stdlib.h> +#include <stdio.h> + +void gopanic(void); + +static void* +die(void* x) +{ + gopanic(); + return 0; +} + +void +start(void) +{ + if(_beginthreadex(0, 0, die, 0, 0, 0) != 0) + printf("_beginthreadex failed\n"); +} +` diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go index 39e0434..7e8a2e4 100644 --- a/libgo/go/runtime/crash_test.go +++ b/libgo/go/runtime/crash_test.go @@ -31,10 +31,11 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd { return cmd } -func executeTest(t *testing.T, templ string, data interface{}) string { +func executeTest(t *testing.T, templ string, data interface{}, extra ...string) string { t.Skip("gccgo does not have a go command") - if runtime.GOOS == "nacl" { - t.Skip("skipping on nacl") + switch runtime.GOOS { + case "android", "nacl": + t.Skipf("skipping on %s", runtime.GOOS) } checkStaleRuntime(t) @@ -61,7 +62,20 @@ func executeTest(t *testing.T, templ string, data interface{}) string { t.Fatalf("failed to close file: %v", err) } - got, _ := testEnv(exec.Command("go", "run", src)).CombinedOutput() + for i := 0; i < len(extra); i += 2 { + if err := ioutil.WriteFile(filepath.Join(dir, extra[i]), []byte(extra[i+1]), 0666); err != nil { + t.Fatal(err) + } + } + + cmd := exec.Command("go", "build", "-o", "a.exe") + cmd.Dir = dir + out, err := testEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("building source: %v\n%s", err, out) + } + + got, _ := testEnv(exec.Command(filepath.Join(dir, "a.exe"))).CombinedOutput() return string(got) } @@ -159,6 +173,22 @@ func TestGoexitCrash(t *testing.T) { } } +func TestGoexitDefer(t *testing.T) { + c := make(chan struct{}) + go func() { + defer func() { + r := recover() + if r != nil { + t.Errorf("non-nil recover during Goexit") + } + c <- struct{}{} + }() + runtime.Goexit() + }() + // Note: if the defer fails to run, we will get a deadlock here + <-c +} + func TestGoNil(t *testing.T) { output := executeTest(t, goNilSource, nil) want := "go of nil func value" @@ -167,6 +197,22 @@ func TestGoNil(t *testing.T) { } } +func TestMainGoroutineId(t *testing.T) { + output := executeTest(t, mainGoroutineIdSource, nil) + want := "panic: test\n\ngoroutine 1 [running]:\n" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +func TestBreakpoint(t *testing.T) { + output := executeTest(t, breakpointSource, nil) + want := "runtime.Breakpoint()" + if !strings.Contains(output, want) { + t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want) + } +} + const crashSource = ` package main @@ -365,3 +411,106 @@ func main() { select{} } ` + +const mainGoroutineIdSource = ` +package main +func main() { + panic("test") +} +` + +const breakpointSource = ` +package main +import "runtime" +func main() { + runtime.Breakpoint() +} +` + +func TestGoexitInPanic(t *testing.T) { + // see issue 8774: this code used to trigger an infinite recursion + output := executeTest(t, goexitInPanicSource, nil) + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const goexitInPanicSource = ` +package main +import "runtime" +func main() { + go func() { + defer func() { + runtime.Goexit() + }() + panic("hello") + }() + runtime.Goexit() +} +` + +func TestPanicAfterGoexit(t *testing.T) { + // an uncaught panic should still work after goexit + output := executeTest(t, panicAfterGoexitSource, nil) + want := "panic: hello" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const panicAfterGoexitSource = ` +package main +import "runtime" +func main() { + defer func() { + panic("hello") + }() + runtime.Goexit() +} +` + +func TestRecoveredPanicAfterGoexit(t *testing.T) { + output := executeTest(t, recoveredPanicAfterGoexitSource, nil) + want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!" + if !strings.HasPrefix(output, want) { + t.Fatalf("output does not start with %q:\n%s", want, output) + } +} + +const recoveredPanicAfterGoexitSource = ` +package main +import "runtime" +func main() { + defer func() { + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + panic("hello") + }() + runtime.Goexit() +} +` + +func TestRecoverBeforePanicAfterGoexit(t *testing.T) { + // 1. defer a function that recovers + // 2. defer a function that panics + // 3. call goexit + // Goexit should run the #2 defer. Its panic + // should be caught by the #1 defer, and execution + // should resume in the caller. Like the Goexit + // never happened! + defer func() { + r := recover() + if r == nil { + panic("bad recover") + } + }() + defer func() { + panic("hello") + }() + runtime.Goexit() +} diff --git a/libgo/go/runtime/env_posix.go b/libgo/go/runtime/env_posix.go new file mode 100644 index 0000000..8b1dbb7 --- /dev/null +++ b/libgo/go/runtime/env_posix.go @@ -0,0 +1,58 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows + +package runtime + +import "unsafe" + +func environ() []string + +func getenv(s *byte) *byte { + val := gogetenv(gostringnocopy(s)) + if val == "" { + return nil + } + // Strings found in environment are NUL-terminated. + return &bytes(val)[0] +} + +func gogetenv(key string) string { + env := environ() + if env == nil { + gothrow("getenv before env init") + } + for _, s := range environ() { + if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key { + return s[len(key)+1:] + } + } + return "" +} + +//extern setenv +func _cgo_setenv(unsafe.Pointer, unsafe.Pointer, int32) + +//extern unsetenv +func _cgo_unsetenv(unsafe.Pointer) + +// Update the C environment if cgo is loaded. +// Called from syscall.Setenv. +func syscall_setenv_c(k string, v string) { + _cgo_setenv(cstring(k), cstring(v), 1) +} + +// Update the C environment if cgo is loaded. +// Called from syscall.unsetenv. +func syscall_unsetenv_c(k string) { + _cgo_unsetenv(cstring(k)) +} + +func cstring(s string) unsafe.Pointer { + p := make([]byte, len(s)+1) + sp := (*_string)(unsafe.Pointer(&s)) + memmove(unsafe.Pointer(&p[0]), unsafe.Pointer(sp.str), uintptr(len(s))) + return unsafe.Pointer(&p[0]) +} diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go index 5a1e9b8..fe9e839 100644 --- a/libgo/go/runtime/gc_test.go +++ b/libgo/go/runtime/gc_test.go @@ -10,6 +10,7 @@ import ( "runtime/debug" "testing" "time" + "unsafe" ) func TestGcSys(t *testing.T) { @@ -165,6 +166,37 @@ func TestGcLastTime(t *testing.T) { if t0 > last || last > t1 { t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1) } + pause := ms.PauseNs[(ms.NumGC+255)%256] + // Due to timer granularity, pause can actually be 0 on windows + // or on virtualized environments. + if pause == 0 { + t.Logf("last GC pause was 0") + } else if pause > 10e9 { + t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause) + } +} + +var hugeSink interface{} + +func TestHugeGCInfo(t *testing.T) { + // The test ensures that compiler can chew these huge types even on weakest machines. + // The types are not allocated at runtime. + if hugeSink != nil { + // 400MB on 32 bots, 4TB on 64-bits. + const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40 + hugeSink = new([n]*byte) + hugeSink = new([n]uintptr) + hugeSink = new(struct { + x float64 + y [n]*byte + z []string + }) + hugeSink = new(struct { + x float64 + y [n]uintptr + z []string + }) + } } func BenchmarkSetTypeNoPtr1(b *testing.B) { @@ -236,3 +268,27 @@ func BenchmarkAllocation(b *testing.B) { <-result } } + +func TestPrintGC(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) + done := make(chan bool) + go func() { + for { + select { + case <-done: + return + default: + runtime.GC() + } + } + }() + for i := 0; i < 1e4; i++ { + func() { + defer print("") + }() + } + close(done) +} diff --git a/libgo/go/runtime/gcinfo_test.go b/libgo/go/runtime/gcinfo_test.go new file mode 100644 index 0000000..0044992 --- /dev/null +++ b/libgo/go/runtime/gcinfo_test.go @@ -0,0 +1,194 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +import ( + "bytes" + "runtime" + "testing" +) + +// TestGCInfo tests that various objects in heap, data and bss receive correct GC pointer type info. +func TestGCInfo(t *testing.T) { + t.Skip("skipping on gccgo for now") + verifyGCInfo(t, "bss ScalarPtr", &bssScalarPtr, nonStackInfo(infoScalarPtr)) + verifyGCInfo(t, "bss PtrScalar", &bssPtrScalar, nonStackInfo(infoPtrScalar)) + verifyGCInfo(t, "bss BigStruct", &bssBigStruct, nonStackInfo(infoBigStruct())) + verifyGCInfo(t, "bss string", &bssString, nonStackInfo(infoString)) + verifyGCInfo(t, "bss slice", &bssSlice, nonStackInfo(infoSlice)) + verifyGCInfo(t, "bss eface", &bssEface, nonStackInfo(infoEface)) + verifyGCInfo(t, "bss iface", &bssIface, nonStackInfo(infoIface)) + + verifyGCInfo(t, "data ScalarPtr", &dataScalarPtr, nonStackInfo(infoScalarPtr)) + verifyGCInfo(t, "data PtrScalar", &dataPtrScalar, nonStackInfo(infoPtrScalar)) + verifyGCInfo(t, "data BigStruct", &dataBigStruct, nonStackInfo(infoBigStruct())) + verifyGCInfo(t, "data string", &dataString, nonStackInfo(infoString)) + verifyGCInfo(t, "data slice", &dataSlice, nonStackInfo(infoSlice)) + verifyGCInfo(t, "data eface", &dataEface, nonStackInfo(infoEface)) + verifyGCInfo(t, "data iface", &dataIface, nonStackInfo(infoIface)) + + verifyGCInfo(t, "stack ScalarPtr", new(ScalarPtr), infoScalarPtr) + verifyGCInfo(t, "stack PtrScalar", new(PtrScalar), infoPtrScalar) + verifyGCInfo(t, "stack BigStruct", new(BigStruct), infoBigStruct()) + verifyGCInfo(t, "stack string", new(string), infoString) + verifyGCInfo(t, "stack slice", new([]string), infoSlice) + verifyGCInfo(t, "stack eface", new(interface{}), infoEface) + verifyGCInfo(t, "stack iface", new(Iface), infoIface) + + for i := 0; i < 10; i++ { + verifyGCInfo(t, "heap ScalarPtr", escape(new(ScalarPtr)), nonStackInfo(infoScalarPtr)) + verifyGCInfo(t, "heap PtrScalar", escape(new(PtrScalar)), nonStackInfo(infoPtrScalar)) + verifyGCInfo(t, "heap BigStruct", escape(new(BigStruct)), nonStackInfo(infoBigStruct())) + verifyGCInfo(t, "heap string", escape(new(string)), nonStackInfo(infoString)) + verifyGCInfo(t, "heap eface", escape(new(interface{})), nonStackInfo(infoEface)) + verifyGCInfo(t, "heap iface", escape(new(Iface)), nonStackInfo(infoIface)) + } + +} + +func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) { + mask := /* runtime.GCMask(p) */ []byte(nil) + if len(mask) > len(mask0) { + mask0 = append(mask0, BitsDead) + mask = mask[:len(mask0)] + } + if bytes.Compare(mask, mask0) != 0 { + t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask) + return + } +} + +func nonStackInfo(mask []byte) []byte { + // BitsDead is replaced with BitsScalar everywhere except stacks. + mask1 := make([]byte, len(mask)) + mw := false + for i, v := range mask { + if !mw && v == BitsDead { + v = BitsScalar + } + mw = !mw && v == BitsMultiWord + mask1[i] = v + } + return mask1 +} + +var gcinfoSink interface{} + +func escape(p interface{}) interface{} { + gcinfoSink = p + return p +} + +const ( + BitsDead = iota + BitsScalar + BitsPointer + BitsMultiWord +) + +const ( + BitsString = iota // unused + BitsSlice // unused + BitsIface + BitsEface +) + +type ScalarPtr struct { + q int + w *int + e int + r *int + t int + y *int +} + +var infoScalarPtr = []byte{BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer} + +type PtrScalar struct { + q *int + w int + e *int + r int + t *int + y int +} + +var infoPtrScalar = []byte{BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar} + +type BigStruct struct { + q *int + w byte + e [17]byte + r []byte + t int + y uint16 + u uint64 + i string +} + +func infoBigStruct() []byte { + switch runtime.GOARCH { + case "386", "arm": + return []byte{ + BitsPointer, // q *int + BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte + BitsPointer, BitsDead, BitsDead, // r []byte + BitsScalar, BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64 + BitsPointer, BitsDead, // i string + } + case "amd64": + return []byte{ + BitsPointer, // q *int + BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte + BitsPointer, BitsDead, BitsDead, // r []byte + BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64 + BitsPointer, BitsDead, // i string + } + case "amd64p32": + return []byte{ + BitsPointer, // q *int + BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte + BitsPointer, BitsDead, BitsDead, // r []byte + BitsScalar, BitsScalar, BitsDead, BitsScalar, BitsScalar, // t int; y uint16; u uint64 + BitsPointer, BitsDead, // i string + } + default: + panic("unknown arch") + } +} + +type Iface interface { + f() +} + +type IfaceImpl int + +func (IfaceImpl) f() { +} + +var ( + // BSS + bssScalarPtr ScalarPtr + bssPtrScalar PtrScalar + bssBigStruct BigStruct + bssString string + bssSlice []string + bssEface interface{} + bssIface Iface + + // DATA + dataScalarPtr = ScalarPtr{q: 1} + dataPtrScalar = PtrScalar{w: 1} + dataBigStruct = BigStruct{w: 1} + dataString = "foo" + dataSlice = []string{"foo"} + dataEface interface{} = 42 + dataIface Iface = IfaceImpl(42) + + infoString = []byte{BitsPointer, BitsDead} + infoSlice = []byte{BitsPointer, BitsDead, BitsDead} + infoEface = []byte{BitsMultiWord, BitsEface} + infoIface = []byte{BitsMultiWord, BitsIface} +) diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go new file mode 100644 index 0000000..b4e6244 --- /dev/null +++ b/libgo/go/runtime/hashmap.go @@ -0,0 +1,953 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go's map type. +// +// A map is just a hash table. The data is arranged +// into an array of buckets. Each bucket contains up to +// 8 key/value pairs. The low-order bits of the hash are +// used to select a bucket. Each bucket contains a few +// high-order bits of each hash to distinguish the entries +// within a single bucket. +// +// If more than 8 keys hash to a bucket, we chain on +// extra buckets. +// +// When the hashtable grows, we allocate a new array +// of buckets twice as big. Buckets are incrementally +// copied from the old bucket array to the new bucket array. +// +// Map iterators walk through the array of buckets and +// return the keys in walk order (bucket #, then overflow +// chain order, then bucket index). To maintain iteration +// semantics, we never move keys within their bucket (if +// we did, keys might be returned 0 or 2 times). When +// growing the table, iterators remain iterating through the +// old table and must check the new table if the bucket +// they are iterating through has been moved ("evacuated") +// to the new table. + +// Picking loadFactor: too large and we have lots of overflow +// buckets, too small and we waste a lot of space. I wrote +// a simple program to check some stats for different loads: +// (64-bit, 8 byte keys and values) +// loadFactor %overflow bytes/entry hitprobe missprobe +// 4.00 2.13 20.77 3.00 4.00 +// 4.50 4.05 17.30 3.25 4.50 +// 5.00 6.85 14.77 3.50 5.00 +// 5.50 10.55 12.94 3.75 5.50 +// 6.00 15.27 11.67 4.00 6.00 +// 6.50 20.90 10.79 4.25 6.50 +// 7.00 27.14 10.15 4.50 7.00 +// 7.50 34.03 9.73 4.75 7.50 +// 8.00 41.10 9.40 5.00 8.00 +// +// %overflow = percentage of buckets which have an overflow bucket +// bytes/entry = overhead bytes used per key/value pair +// hitprobe = # of entries to check when looking up a present key +// missprobe = # of entries to check when looking up an absent key +// +// Keep in mind this data is for maximally loaded tables, i.e. just +// before the table grows. Typical tables will be somewhat less loaded. + +import ( + "unsafe" +) + +const ( + // Maximum number of key/value pairs a bucket can hold. + bucketCntBits = 3 + bucketCnt = 1 << bucketCntBits + + // Maximum average load of a bucket that triggers growth. + loadFactor = 6.5 + + // Maximum key or value size to keep inline (instead of mallocing per element). + // Must fit in a uint8. + // Fast versions cannot handle big values - the cutoff size for + // fast versions in ../../cmd/gc/walk.c must be at most this value. + maxKeySize = 128 + maxValueSize = 128 + + // data offset should be the size of the bmap struct, but needs to be + // aligned correctly. For amd64p32 this means 64-bit alignment + // even though pointers are 32 bit. + dataOffset = unsafe.Offsetof(struct { + b bmap + v int64 + }{}.v) + + // Possible tophash values. We reserve a few possibilities for special marks. + // Each bucket (including its overflow buckets, if any) will have either all or none of its + // entries in the evacuated* states (except during the evacuate() method, which only happens + // during map writes and thus no one else can observe the map during that time). + empty = 0 // cell is empty + evacuatedEmpty = 1 // cell is empty, bucket is evacuated. + evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. + evacuatedY = 3 // same as above, but evacuated to second half of larger table. + minTopHash = 4 // minimum tophash for a normal filled cell. + + // flags + iterator = 1 // there may be an iterator using buckets + oldIterator = 2 // there may be an iterator using oldbuckets + + // sentinel bucket ID for iterator checks + noCheck = 1<<(8*ptrSize) - 1 + + // trigger a garbage collection at every alloc called from this code + checkgc = false +) + +// A header for a Go map. +type hmap struct { + // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and + // ../reflect/type.go. Don't change this structure without also changing that code! + count int // # live cells == size of map. Must be first (used by len() builtin) + flags uint32 + hash0 uint32 // hash seed + B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) + + buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. + oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing + nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) +} + +// A bucket for a Go map. +type bmap struct { + tophash [bucketCnt]uint8 + overflow *bmap + // Followed by bucketCnt keys and then bucketCnt values. + // NOTE: packing all the keys together and then all the values together makes the + // code a bit more complicated than alternating key/value/key/value/... but it allows + // us to eliminate padding which would be needed for, e.g., map[int64]int8. +} + +// A hash iteration structure. +// If you modify hiter, also change cmd/gc/reflect.c to indicate +// the layout of this structure. +type hiter struct { + key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c). + value unsafe.Pointer // Must be in second position (see cmd/gc/range.c). + t *maptype + h *hmap + buckets unsafe.Pointer // bucket ptr at hash_iter initialization time + bptr *bmap // current bucket + startBucket uintptr // bucket iteration started at + offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) + wrapped bool // already wrapped around from end of bucket array to beginning + B uint8 + i uint8 + bucket uintptr + checkBucket uintptr +} + +func evacuated(b *bmap) bool { + h := b.tophash[0] + return h > empty && h < minTopHash +} + +func makemap(t *maptype, hint int64) *hmap { + if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) { + gothrow("bad hmap size") + } + + if hint < 0 || int64(int32(hint)) != hint { + panic("makemap: size out of range") + // TODO: make hint an int, then none of this nonsense + } + + if !ismapkey(t.key) { + gothrow("runtime.makemap: unsupported map key type") + } + + // check compiler's and reflect's math + if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) || + t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { + gothrow("key size wrong") + } + if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) || + t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { + gothrow("value size wrong") + } + + // invariants we depend on. We should probably check these at compile time + // somewhere, but for now we'll do it here. + if t.key.align > bucketCnt { + gothrow("key align too big") + } + if t.elem.align > bucketCnt { + gothrow("value align too big") + } + if uintptr(t.key.size)%uintptr(t.key.align) != 0 { + gothrow("key size not a multiple of key align") + } + if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 { + gothrow("value size not a multiple of value align") + } + if bucketCnt < 8 { + gothrow("bucketsize too small for proper alignment") + } + if dataOffset%uintptr(t.key.align) != 0 { + gothrow("need padding in bucket (key)") + } + if dataOffset%uintptr(t.elem.align) != 0 { + gothrow("need padding in bucket (value)") + } + + // find size parameter which will hold the requested # of elements + B := uint8(0) + for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ { + } + + // allocate initial hash table + // if B == 0, the buckets field is allocated lazily later (in mapassign) + // If hint is large zeroing this memory could take a while. + var buckets unsafe.Pointer + if B != 0 { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + buckets = newarray(t.bucket, uintptr(1)<<B) + } + + // initialize Hmap + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + h := (*hmap)(newobject(t.hmap)) + h.count = 0 + h.B = B + h.flags = 0 + h.hash0 = fastrand1() + h.buckets = buckets + h.oldbuckets = nil + h.nevacuate = 0 + + return h +} + +// mapaccess1 returns a pointer to h[key]. Never returns nil, instead +// it will return a reference to the zero object for the value type if +// the key is not in the map. +// NOTE: The returned pointer may keep the whole map live, so don't +// hold onto it for very long. +func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapaccess1) + racereadpc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + if alg.equal(key, k, uintptr(t.key.size)) { + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + return v + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapaccess2) + racereadpc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + if alg.equal(key, k, uintptr(t.key.size)) { + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + return v, true + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} + +// returns both key and value. Used by map iterator +func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { + if h == nil || h.count == 0 { + return nil, nil + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + if alg.equal(key, k, uintptr(t.key.size)) { + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + return k, v + } + } + b = b.overflow + if b == nil { + return nil, nil + } + } +} + +func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { + if h == nil { + panic("assignment to entry in nil map") + } + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapassign1) + racewritepc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + raceReadObjectPC(t.elem, val, callerpc, pc) + } + + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + + if h.buckets == nil { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + h.buckets = newarray(t.bucket, 1) + } + +again: + bucket := hash & (uintptr(1)<<h.B - 1) + if h.oldbuckets != nil { + growWork(t, h, bucket) + } + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + + var inserti *uint8 + var insertk unsafe.Pointer + var insertv unsafe.Pointer + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + if b.tophash[i] == empty && inserti == nil { + inserti = &b.tophash[i] + insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + } + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if !alg.equal(key, k2, uintptr(t.key.size)) { + continue + } + // already have a mapping for key. Update it. + memmove(k2, key, uintptr(t.key.size)) + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) + v2 := v + if t.indirectvalue { + v2 = *((*unsafe.Pointer)(v2)) + } + memmove(v2, val, uintptr(t.elem.size)) + return + } + if b.overflow == nil { + break + } + b = b.overflow + } + + // did not find mapping for key. Allocate new cell & add entry. + if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt { + hashGrow(t, h) + goto again // Growing the table invalidates everything, so try again + } + + if inserti == nil { + // all current buckets are full, allocate a new one. + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newb := (*bmap)(newobject(t.bucket)) + b.overflow = newb + inserti = &newb.tophash[0] + insertk = add(unsafe.Pointer(newb), dataOffset) + insertv = add(insertk, bucketCnt*uintptr(t.keysize)) + } + + // store new key/value at insert position + if t.indirectkey { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + kmem := newobject(t.key) + *(*unsafe.Pointer)(insertk) = kmem + insertk = kmem + } + if t.indirectvalue { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + vmem := newobject(t.elem) + *(*unsafe.Pointer)(insertv) = vmem + insertv = vmem + } + memmove(insertk, key, uintptr(t.key.size)) + memmove(insertv, val, uintptr(t.elem.size)) + *inserti = top + h.count++ +} + +func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + pc := funcPC(mapdelete) + racewritepc(unsafe.Pointer(h), callerpc, pc) + raceReadObjectPC(t.key, key, callerpc, pc) + } + if h == nil || h.count == 0 { + return + } + alg := goalg(t.key.alg) + hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0)) + bucket := hash & (uintptr(1)<<h.B - 1) + if h.oldbuckets != nil { + growWork(t, h, bucket) + } + b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + if b.tophash[i] != top { + continue + } + k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if !alg.equal(key, k2, uintptr(t.key.size)) { + continue + } + memclr(k, uintptr(t.keysize)) + v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize)) + memclr(v, uintptr(t.valuesize)) + b.tophash[i] = empty + h.count-- + return + } + b = b.overflow + if b == nil { + return + } + } +} + +func mapiterinit(t *maptype, h *hmap, it *hiter) { + // Clear pointer fields so garbage collector does not complain. + it.key = nil + it.value = nil + it.t = nil + it.h = nil + it.buckets = nil + it.bptr = nil + + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) + } + + if h == nil || h.count == 0 { + it.key = nil + it.value = nil + return + } + + if unsafe.Sizeof(hiter{})/ptrSize != 10 { + gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c + } + it.t = t + it.h = h + + // grab snapshot of bucket state + it.B = h.B + it.buckets = h.buckets + + // decide where to start + r := uintptr(fastrand1()) + if h.B > 31-bucketCntBits { + r += uintptr(fastrand1()) << 31 + } + it.startBucket = r & (uintptr(1)<<h.B - 1) + it.offset = uint8(r >> h.B & (bucketCnt - 1)) + + // iterator state + it.bucket = it.startBucket + it.wrapped = false + it.bptr = nil + + // Remember we have an iterator. + // Can run concurrently with another hash_iter_init(). + for { + old := h.flags + if old == old|iterator|oldIterator { + break + } + if cas(&h.flags, old, old|iterator|oldIterator) { + break + } + } + + mapiternext(it) +} + +func mapiternext(it *hiter) { + h := it.h + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&it)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) + } + t := it.t + bucket := it.bucket + b := it.bptr + i := it.i + checkBucket := it.checkBucket + alg := goalg(t.key.alg) + +next: + if b == nil { + if bucket == it.startBucket && it.wrapped { + // end of iteration + it.key = nil + it.value = nil + return + } + if h.oldbuckets != nil && it.B == h.B { + // Iterator was started in the middle of a grow, and the grow isn't done yet. + // If the bucket we're looking at hasn't been filled in yet (i.e. the old + // bucket hasn't been evacuated) then we need to iterate through the old + // bucket and only return the ones that will be migrated to this bucket. + oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1) + b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + if !evacuated(b) { + checkBucket = bucket + } else { + b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) + checkBucket = noCheck + } + } else { + b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) + checkBucket = noCheck + } + bucket++ + if bucket == uintptr(1)<<it.B { + bucket = 0 + it.wrapped = true + } + i = 0 + } + for ; i < bucketCnt; i++ { + offi := (i + it.offset) & (bucketCnt - 1) + k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) + v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) + if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty { + if checkBucket != noCheck { + // Special case: iterator was started during a grow and the + // grow is not done yet. We're working on a bucket whose + // oldbucket has not been evacuated yet. Or at least, it wasn't + // evacuated when we started the bucket. So we're iterating + // through the oldbucket, skipping any keys that will go + // to the other new bucket (each oldbucket expands to two + // buckets during a grow). + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if alg.equal(k2, k2, uintptr(t.key.size)) { + // If the item in the oldbucket is not destined for + // the current new bucket in the iteration, skip it. + hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0)) + if hash&(uintptr(1)<<it.B-1) != checkBucket { + continue + } + } else { + // Hash isn't repeatable if k != k (NaNs). We need a + // repeatable and randomish choice of which direction + // to send NaNs during evacuation. We'll use the low + // bit of tophash to decide which way NaNs go. + // NOTE: this case is why we need two evacuate tophash + // values, evacuatedX and evacuatedY, that differ in + // their low bit. + if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { + continue + } + } + } + if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { + // this is the golden data, we can return it. + if t.indirectkey { + k = *((*unsafe.Pointer)(k)) + } + it.key = k + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + it.value = v + } else { + // The hash table has grown since the iterator was started. + // The golden data for this key is now somewhere else. + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + if alg.equal(k2, k2, uintptr(t.key.size)) { + // Check the current hash table for the data. + // This code handles the case where the key + // has been deleted, updated, or deleted and reinserted. + // NOTE: we need to regrab the key as it has potentially been + // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). + rk, rv := mapaccessK(t, h, k2) + if rk == nil { + continue // key has been deleted + } + it.key = rk + it.value = rv + } else { + // if key!=key then the entry can't be deleted or + // updated, so we can just return it. That's lucky for + // us because when key!=key we can't look it up + // successfully in the current table. + it.key = k2 + if t.indirectvalue { + v = *((*unsafe.Pointer)(v)) + } + it.value = v + } + } + it.bucket = bucket + it.bptr = b + it.i = i + 1 + it.checkBucket = checkBucket + return + } + } + b = b.overflow + i = 0 + goto next +} + +func hashGrow(t *maptype, h *hmap) { + if h.oldbuckets != nil { + gothrow("evacuation not done in time") + } + oldbuckets := h.buckets + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1)) + flags := h.flags &^ (iterator | oldIterator) + if h.flags&iterator != 0 { + flags |= oldIterator + } + // commit the grow (atomic wrt gc) + h.B++ + h.flags = flags + h.oldbuckets = oldbuckets + h.buckets = newbuckets + h.nevacuate = 0 + + // the actual copying of the hash table data is done incrementally + // by growWork() and evacuate(). +} + +func growWork(t *maptype, h *hmap, bucket uintptr) { + noldbuckets := uintptr(1) << (h.B - 1) + + // make sure we evacuate the oldbucket corresponding + // to the bucket we're about to use + evacuate(t, h, bucket&(noldbuckets-1)) + + // evacuate one more oldbucket to make progress on growing + if h.oldbuckets != nil { + evacuate(t, h, h.nevacuate) + } +} + +func evacuate(t *maptype, h *hmap, oldbucket uintptr) { + b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + newbit := uintptr(1) << (h.B - 1) + alg := goalg(t.key.alg) + if !evacuated(b) { + // TODO: reuse overflow buckets instead of using new ones, if there + // is no iterator using the old buckets. (If !oldIterator.) + + x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) + y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) + xi := 0 + yi := 0 + xk := add(unsafe.Pointer(x), dataOffset) + yk := add(unsafe.Pointer(y), dataOffset) + xv := add(xk, bucketCnt*uintptr(t.keysize)) + yv := add(yk, bucketCnt*uintptr(t.keysize)) + for ; b != nil; b = b.overflow { + k := add(unsafe.Pointer(b), dataOffset) + v := add(k, bucketCnt*uintptr(t.keysize)) + for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { + top := b.tophash[i] + if top == empty { + b.tophash[i] = evacuatedEmpty + continue + } + if top < minTopHash { + gothrow("bad map state") + } + k2 := k + if t.indirectkey { + k2 = *((*unsafe.Pointer)(k2)) + } + // Compute hash to make our evacuation decision (whether we need + // to send this key/value to bucket x or bucket y). + hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0)) + if h.flags&iterator != 0 { + if !alg.equal(k2, k2, uintptr(t.key.size)) { + // If key != key (NaNs), then the hash could be (and probably + // will be) entirely different from the old hash. Moreover, + // it isn't reproducible. Reproducibility is required in the + // presence of iterators, as our evacuation decision must + // match whatever decision the iterator made. + // Fortunately, we have the freedom to send these keys either + // way. Also, tophash is meaningless for these kinds of keys. + // We let the low bit of tophash drive the evacuation decision. + // We recompute a new random tophash for the next level so + // these keys will get evenly distributed across all buckets + // after multiple grows. + if (top & 1) != 0 { + hash |= newbit + } else { + hash &^= newbit + } + top = uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + } + } + if (hash & newbit) == 0 { + b.tophash[i] = evacuatedX + if xi == bucketCnt { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newx := (*bmap)(newobject(t.bucket)) + x.overflow = newx + x = newx + xi = 0 + xk = add(unsafe.Pointer(x), dataOffset) + xv = add(xk, bucketCnt*uintptr(t.keysize)) + } + x.tophash[xi] = top + if t.indirectkey { + *(*unsafe.Pointer)(xk) = k2 // copy pointer + } else { + memmove(xk, k, uintptr(t.key.size)) // copy value + } + if t.indirectvalue { + *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) + } else { + memmove(xv, v, uintptr(t.elem.size)) + } + xi++ + xk = add(xk, uintptr(t.keysize)) + xv = add(xv, uintptr(t.valuesize)) + } else { + b.tophash[i] = evacuatedY + if yi == bucketCnt { + if checkgc { + memstats.next_gc = memstats.heap_alloc + } + newy := (*bmap)(newobject(t.bucket)) + y.overflow = newy + y = newy + yi = 0 + yk = add(unsafe.Pointer(y), dataOffset) + yv = add(yk, bucketCnt*uintptr(t.keysize)) + } + y.tophash[yi] = top + if t.indirectkey { + *(*unsafe.Pointer)(yk) = k2 + } else { + memmove(yk, k, uintptr(t.key.size)) + } + if t.indirectvalue { + *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) + } else { + memmove(yv, v, uintptr(t.elem.size)) + } + yi++ + yk = add(yk, uintptr(t.keysize)) + yv = add(yv, uintptr(t.valuesize)) + } + } + } + // Unlink the overflow buckets & clear key/value to help GC. + if h.flags&oldIterator == 0 { + b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) + b.overflow = nil + memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) + } + } + + // Advance evacuation mark + if oldbucket == h.nevacuate { + h.nevacuate = oldbucket + 1 + if oldbucket+1 == newbit { // newbit == # of oldbuckets + // Growing is all done. Free old main bucket array. + h.oldbuckets = nil + } + } +} + +func ismapkey(t *_type) bool { + return goalg(t.alg).hash != nil +} + +// Reflect stubs. Called from ../reflect/asm_*.s + +func reflect_makemap(t *maptype) *hmap { + return makemap(t, 0) +} + +func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { + val, ok := mapaccess2(t, h, key) + if !ok { + // reflect wants nil for a missing element + val = nil + } + return val +} + +func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { + mapassign1(t, h, key, val) +} + +func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { + mapdelete(t, h, key) +} + +func reflect_mapiterinit(t *maptype, h *hmap) *hiter { + it := new(hiter) + mapiterinit(t, h, it) + return it +} + +func reflect_mapiternext(it *hiter) { + mapiternext(it) +} + +func reflect_mapiterkey(it *hiter) unsafe.Pointer { + return it.key +} + +func reflect_maplen(h *hmap) int { + if h == nil { + return 0 + } + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&h)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen)) + } + return h.count +} + +func reflect_ismapkey(t *_type) bool { + return ismapkey(t) +} diff --git a/libgo/go/runtime/hashmap_fast.go b/libgo/go/runtime/hashmap_fast.go new file mode 100644 index 0000000..8e21e02 --- /dev/null +++ b/libgo/go/runtime/hashmap_fast.go @@ -0,0 +1,379 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} + +func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + var b *bmap + if h.B == 0 { + // One-bucket table. No need to hash. + b = (*bmap)(h.buckets) + } else { + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) + if k != key { + continue + } + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} + +func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero) + } + key := (*stringStruct)(unsafe.Pointer(&ky)) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) + } + } + return unsafe.Pointer(t.elem.zero) + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) + } + // check first 4 bytes + // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of + // four 1-byte comparisons. + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize)) + if memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)) + } + } + return unsafe.Pointer(t.elem.zero) + } +dohash: + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x != top { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero) + } + } +} + +func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { + if raceenabled && h != nil { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) + } + if h == nil || h.count == 0 { + return unsafe.Pointer(t.elem.zero), false + } + key := (*stringStruct)(unsafe.Pointer(&ky)) + if h.B == 0 { + // One-bucket table. + b := (*bmap)(h.buckets) + if key.len < 32 { + // short key, doing lots of comparisons is ok + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true + } + } + return unsafe.Pointer(t.elem.zero), false + } + // long key, try not to do more comparisons than necessary + keymaybe := uintptr(bucketCnt) + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x == empty { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true + } + // check first 4 bytes + if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { + continue + } + // check last 4 bytes + if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { + continue + } + if keymaybe != bucketCnt { + // Two keys are potential matches. Use hash to distinguish them. + goto dohash + } + keymaybe = i + } + if keymaybe != bucketCnt { + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize)) + if memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true + } + } + return unsafe.Pointer(t.elem.zero), false + } +dohash: + hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0)) + m := uintptr(1)<<h.B - 1 + b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) + if c := h.oldbuckets; c != nil { + oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) + if !evacuated(oldb) { + b = oldb + } + } + top := uint8(hash >> (ptrSize*8 - 8)) + if top < minTopHash { + top += minTopHash + } + for { + for i := uintptr(0); i < bucketCnt; i++ { + x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check + if x != top { + continue + } + k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) + if k.len != key.len { + continue + } + if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { + return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true + } + } + b = b.overflow + if b == nil { + return unsafe.Pointer(t.elem.zero), false + } + } +} diff --git a/libgo/go/runtime/lock_futex.go b/libgo/go/runtime/lock_futex.go new file mode 100644 index 0000000..7259623 --- /dev/null +++ b/libgo/go/runtime/lock_futex.go @@ -0,0 +1,205 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd linux + +package runtime + +import "unsafe" + +// This implementation depends on OS-specific implementations of +// +// runtime·futexsleep(uint32 *addr, uint32 val, int64 ns) +// Atomically, +// if(*addr == val) sleep +// Might be woken up spuriously; that's allowed. +// Don't sleep longer than ns; ns < 0 means forever. +// +// runtime·futexwakeup(uint32 *addr, uint32 cnt) +// If any procs are sleeping on addr, wake up at most cnt. + +const ( + mutex_unlocked = 0 + mutex_locked = 1 + mutex_sleeping = 2 + + active_spin = 4 + active_spin_cnt = 30 + passive_spin = 1 +) + +// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. +// mutex_sleeping means that there is presumably at least one sleeping thread. +// Note that there can be spinning threads during all states - they do not +// affect mutex's state. + +func futexsleep(addr *uint32, val uint32, ns int64) +func futexwakeup(addr *uint32, cnt uint32) + +// We use the uintptr mutex.key and note.key as a uint32. +func key32(p *uintptr) *uint32 { + return (*uint32)(unsafe.Pointer(p)) +} + +func lock(l *mutex) { + gp := getg() + + if gp.m.locks < 0 { + gothrow("runtime·lock: lock count") + } + gp.m.locks++ + + // Speculative grab for lock. + v := xchg(key32(&l.key), mutex_locked) + if v == mutex_unlocked { + return + } + + // wait is either MUTEX_LOCKED or MUTEX_SLEEPING + // depending on whether there is a thread sleeping + // on this mutex. If we ever change l->key from + // MUTEX_SLEEPING to some other value, we must be + // careful to change it back to MUTEX_SLEEPING before + // returning, to ensure that the sleeping thread gets + // its wakeup call. + wait := v + + // On uniprocessors, no point spinning. + // On multiprocessors, spin for ACTIVE_SPIN attempts. + spin := 0 + if ncpu > 1 { + spin = active_spin + } + for { + // Try for lock, spinning. + for i := 0; i < spin; i++ { + for l.key == mutex_unlocked { + if cas(key32(&l.key), mutex_unlocked, wait) { + return + } + } + procyield(active_spin_cnt) + } + + // Try for lock, rescheduling. + for i := 0; i < passive_spin; i++ { + for l.key == mutex_unlocked { + if cas(key32(&l.key), mutex_unlocked, wait) { + return + } + } + osyield() + } + + // Sleep. + v = xchg(key32(&l.key), mutex_sleeping) + if v == mutex_unlocked { + return + } + wait = mutex_sleeping + futexsleep(key32(&l.key), mutex_sleeping, -1) + } +} + +func unlock(l *mutex) { + v := xchg(key32(&l.key), mutex_unlocked) + if v == mutex_unlocked { + gothrow("unlock of unlocked lock") + } + if v == mutex_sleeping { + futexwakeup(key32(&l.key), 1) + } + + gp := getg() + gp.m.locks-- + if gp.m.locks < 0 { + gothrow("runtime·unlock: lock count") + } + if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack + gp.stackguard0 = stackPreempt + } +} + +// One-time notifications. +func noteclear(n *note) { + n.key = 0 +} + +func notewakeup(n *note) { + old := xchg(key32(&n.key), 1) + if old != 0 { + print("notewakeup - double wakeup (", old, ")\n") + gothrow("notewakeup - double wakeup") + } + futexwakeup(key32(&n.key), 1) +} + +func notesleep(n *note) { + gp := getg() + if gp != gp.m.g0 { + gothrow("notesleep not on g0") + } + for atomicload(key32(&n.key)) == 0 { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, -1) + gp.m.blocked = false + } +} + +//go:nosplit +func notetsleep_internal(n *note, ns int64) bool { + gp := getg() + + if ns < 0 { + for atomicload(key32(&n.key)) == 0 { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, -1) + gp.m.blocked = false + } + return true + } + + if atomicload(key32(&n.key)) != 0 { + return true + } + + deadline := nanotime() + ns + for { + gp.m.blocked = true + futexsleep(key32(&n.key), 0, ns) + gp.m.blocked = false + if atomicload(key32(&n.key)) != 0 { + break + } + now := nanotime() + if now >= deadline { + break + } + ns = deadline - now + } + return atomicload(key32(&n.key)) != 0 +} + +func notetsleep(n *note, ns int64) bool { + gp := getg() + if gp != gp.m.g0 && gp.m.gcing == 0 { + gothrow("notetsleep not on g0") + } + + return notetsleep_internal(n, ns) +} + +// same as runtime·notetsleep, but called on user g (not g0) +// calls only nosplit functions between entersyscallblock/exitsyscall +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + gothrow("notetsleepg on g0") + } + + entersyscallblock() + ok := notetsleep_internal(n, ns) + exitsyscall() + return ok +} diff --git a/libgo/go/runtime/lock_sema.go b/libgo/go/runtime/lock_sema.go new file mode 100644 index 0000000..d136b82 --- /dev/null +++ b/libgo/go/runtime/lock_sema.go @@ -0,0 +1,270 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin nacl netbsd openbsd plan9 solaris windows + +package runtime + +import "unsafe" + +// This implementation depends on OS-specific implementations of +// +// uintptr runtime·semacreate(void) +// Create a semaphore, which will be assigned to m->waitsema. +// The zero value is treated as absence of any semaphore, +// so be sure to return a non-zero value. +// +// int32 runtime·semasleep(int64 ns) +// If ns < 0, acquire m->waitsema and return 0. +// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds. +// Return 0 if the semaphore was acquired, -1 if interrupted or timed out. +// +// int32 runtime·semawakeup(M *mp) +// Wake up mp, which is or will soon be sleeping on mp->waitsema. +// +const ( + locked uintptr = 1 + + active_spin = 4 + active_spin_cnt = 30 + passive_spin = 1 +) + +func semacreate() uintptr +func semasleep(int64) int32 +func semawakeup(mp *m) + +func lock(l *mutex) { + gp := getg() + if gp.m.locks < 0 { + gothrow("runtime·lock: lock count") + } + gp.m.locks++ + + // Speculative grab for lock. + if casuintptr(&l.key, 0, locked) { + return + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + + // On uniprocessor's, no point spinning. + // On multiprocessors, spin for ACTIVE_SPIN attempts. + spin := 0 + if ncpu > 1 { + spin = active_spin + } +Loop: + for i := 0; ; i++ { + v := atomicloaduintptr(&l.key) + if v&locked == 0 { + // Unlocked. Try to lock. + if casuintptr(&l.key, v, v|locked) { + return + } + i = 0 + } + if i < spin { + procyield(active_spin_cnt) + } else if i < spin+passive_spin { + osyield() + } else { + // Someone else has it. + // l->waitm points to a linked list of M's waiting + // for this lock, chained through m->nextwaitm. + // Queue this M. + for { + gp.m.nextwaitm = (*m)((unsafe.Pointer)(v &^ locked)) + if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) { + break + } + v = atomicloaduintptr(&l.key) + if v&locked == 0 { + continue Loop + } + } + if v&locked != 0 { + // Queued. Wait. + semasleep(-1) + i = 0 + } + } + } +} + +func unlock(l *mutex) { + gp := getg() + var mp *m + for { + v := atomicloaduintptr(&l.key) + if v == locked { + if casuintptr(&l.key, locked, 0) { + break + } + } else { + // Other M's are waiting for the lock. + // Dequeue an M. + mp = (*m)((unsafe.Pointer)(v &^ locked)) + if casuintptr(&l.key, v, uintptr(unsafe.Pointer(mp.nextwaitm))) { + // Dequeued an M. Wake it. + semawakeup(mp) + break + } + } + } + gp.m.locks-- + if gp.m.locks < 0 { + gothrow("runtime·unlock: lock count") + } + if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack + gp.stackguard0 = stackPreempt + } +} + +// One-time notifications. +func noteclear(n *note) { + n.key = 0 +} + +func notewakeup(n *note) { + var v uintptr + for { + v = atomicloaduintptr(&n.key) + if casuintptr(&n.key, v, locked) { + break + } + } + + // Successfully set waitm to locked. + // What was it before? + switch { + case v == 0: + // Nothing was waiting. Done. + case v == locked: + // Two notewakeups! Not allowed. + gothrow("notewakeup - double wakeup") + default: + // Must be the waiting m. Wake it up. + semawakeup((*m)(unsafe.Pointer(v))) + } +} + +func notesleep(n *note) { + gp := getg() + if gp != gp.m.g0 { + gothrow("notesleep not on g0") + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { + // Must be locked (got wakeup). + if n.key != locked { + gothrow("notesleep - waitm out of sync") + } + return + } + // Queued. Sleep. + gp.m.blocked = true + semasleep(-1) + gp.m.blocked = false +} + +//go:nosplit +func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { + // gp and deadline are logically local variables, but they are written + // as parameters so that the stack space they require is charged + // to the caller. + // This reduces the nosplit footprint of notetsleep_internal. + gp = getg() + + // Register for wakeup on n->waitm. + if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { + // Must be locked (got wakeup). + if n.key != locked { + gothrow("notetsleep - waitm out of sync") + } + return true + } + if ns < 0 { + // Queued. Sleep. + gp.m.blocked = true + semasleep(-1) + gp.m.blocked = false + return true + } + + deadline = nanotime() + ns + for { + // Registered. Sleep. + gp.m.blocked = true + if semasleep(ns) >= 0 { + gp.m.blocked = false + // Acquired semaphore, semawakeup unregistered us. + // Done. + return true + } + gp.m.blocked = false + // Interrupted or timed out. Still registered. Semaphore not acquired. + ns = deadline - nanotime() + if ns <= 0 { + break + } + // Deadline hasn't arrived. Keep sleeping. + } + + // Deadline arrived. Still registered. Semaphore not acquired. + // Want to give up and return, but have to unregister first, + // so that any notewakeup racing with the return does not + // try to grant us the semaphore when we don't expect it. + for { + v := atomicloaduintptr(&n.key) + switch v { + case uintptr(unsafe.Pointer(gp.m)): + // No wakeup yet; unregister if possible. + if casuintptr(&n.key, v, 0) { + return false + } + case locked: + // Wakeup happened so semaphore is available. + // Grab it to avoid getting out of sync. + gp.m.blocked = true + if semasleep(-1) < 0 { + gothrow("runtime: unable to acquire - semaphore out of sync") + } + gp.m.blocked = false + return true + default: + gothrow("runtime: unexpected waitm - semaphore out of sync") + } + } +} + +func notetsleep(n *note, ns int64) bool { + gp := getg() + if gp != gp.m.g0 && gp.m.gcing == 0 { + gothrow("notetsleep not on g0") + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + return notetsleep_internal(n, ns, nil, 0) +} + +// same as runtime·notetsleep, but called on user g (not g0) +// calls only nosplit functions between entersyscallblock/exitsyscall +func notetsleepg(n *note, ns int64) bool { + gp := getg() + if gp == gp.m.g0 { + gothrow("notetsleepg on g0") + } + if gp.m.waitsema == 0 { + gp.m.waitsema = semacreate() + } + entersyscallblock() + ok := notetsleep_internal(n, ns, nil, 0) + exitsyscall() + return ok +} diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go new file mode 100644 index 0000000..1170449 --- /dev/null +++ b/libgo/go/runtime/malloc.go @@ -0,0 +1,837 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +const ( + debugMalloc = false + + flagNoScan = _FlagNoScan + flagNoZero = _FlagNoZero + + maxTinySize = _TinySize + tinySizeClass = _TinySizeClass + maxSmallSize = _MaxSmallSize + + pageShift = _PageShift + pageSize = _PageSize + pageMask = _PageMask + + bitsPerPointer = _BitsPerPointer + bitsMask = _BitsMask + pointersPerByte = _PointersPerByte + maxGCMask = _MaxGCMask + bitsDead = _BitsDead + bitsPointer = _BitsPointer + + mSpanInUse = _MSpanInUse + + concurrentSweep = _ConcurrentSweep != 0 +) + +// Page number (address>>pageShift) +type pageID uintptr + +// base address for all 0-byte allocations +var zerobase uintptr + +// Allocate an object of size bytes. +// Small objects are allocated from the per-P cache's free lists. +// Large objects (> 32 kB) are allocated straight from the heap. +func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer { + if size == 0 { + return unsafe.Pointer(&zerobase) + } + size0 := size + + if flags&flagNoScan == 0 && typ == nil { + gothrow("malloc missing type") + } + + // This function must be atomic wrt GC, but for performance reasons + // we don't acquirem/releasem on fast path. The code below does not have + // split stack checks, so it can't be preempted by GC. + // Functions like roundup/add are inlined. And onM/racemalloc are nosplit. + // If debugMalloc = true, these assumptions are checked below. + if debugMalloc { + mp := acquirem() + if mp.mallocing != 0 { + gothrow("malloc deadlock") + } + mp.mallocing = 1 + if mp.curg != nil { + mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad + } + } + + c := gomcache() + var s *mspan + var x unsafe.Pointer + if size <= maxSmallSize { + if flags&flagNoScan != 0 && size < maxTinySize { + // Tiny allocator. + // + // Tiny allocator combines several tiny allocation requests + // into a single memory block. The resulting memory block + // is freed when all subobjects are unreachable. The subobjects + // must be FlagNoScan (don't have pointers), this ensures that + // the amount of potentially wasted memory is bounded. + // + // Size of the memory block used for combining (maxTinySize) is tunable. + // Current setting is 16 bytes, which relates to 2x worst case memory + // wastage (when all but one subobjects are unreachable). + // 8 bytes would result in no wastage at all, but provides less + // opportunities for combining. + // 32 bytes provides more opportunities for combining, + // but can lead to 4x worst case wastage. + // The best case winning is 8x regardless of block size. + // + // Objects obtained from tiny allocator must not be freed explicitly. + // So when an object will be freed explicitly, we ensure that + // its size >= maxTinySize. + // + // SetFinalizer has a special case for objects potentially coming + // from tiny allocator, it such case it allows to set finalizers + // for an inner byte of a memory block. + // + // The main targets of tiny allocator are small strings and + // standalone escaping variables. On a json benchmark + // the allocator reduces number of allocations by ~12% and + // reduces heap size by ~20%. + tinysize := uintptr(c.tinysize) + if size <= tinysize { + tiny := unsafe.Pointer(c.tiny) + // Align tiny pointer for required (conservative) alignment. + if size&7 == 0 { + tiny = roundup(tiny, 8) + } else if size&3 == 0 { + tiny = roundup(tiny, 4) + } else if size&1 == 0 { + tiny = roundup(tiny, 2) + } + size1 := size + (uintptr(tiny) - uintptr(unsafe.Pointer(c.tiny))) + if size1 <= tinysize { + // The object fits into existing tiny block. + x = tiny + c.tiny = (*byte)(add(x, size)) + c.tinysize -= uintptr(size1) + c.local_tinyallocs++ + if debugMalloc { + mp := acquirem() + if mp.mallocing == 0 { + gothrow("bad malloc") + } + mp.mallocing = 0 + if mp.curg != nil { + mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard + } + // Note: one releasem for the acquirem just above. + // The other for the acquirem at start of malloc. + releasem(mp) + releasem(mp) + } + return x + } + } + // Allocate a new maxTinySize block. + s = c.alloc[tinySizeClass] + v := s.freelist + if v == nil { + mp := acquirem() + mp.scalararg[0] = tinySizeClass + onM(mcacheRefill_m) + releasem(mp) + s = c.alloc[tinySizeClass] + v = s.freelist + } + s.freelist = v.next + s.ref++ + //TODO: prefetch v.next + x = unsafe.Pointer(v) + (*[2]uint64)(x)[0] = 0 + (*[2]uint64)(x)[1] = 0 + // See if we need to replace the existing tiny block with the new one + // based on amount of remaining free space. + if maxTinySize-size > tinysize { + c.tiny = (*byte)(add(x, size)) + c.tinysize = uintptr(maxTinySize - size) + } + size = maxTinySize + } else { + var sizeclass int8 + if size <= 1024-8 { + sizeclass = size_to_class8[(size+7)>>3] + } else { + sizeclass = size_to_class128[(size-1024+127)>>7] + } + size = uintptr(class_to_size[sizeclass]) + s = c.alloc[sizeclass] + v := s.freelist + if v == nil { + mp := acquirem() + mp.scalararg[0] = uintptr(sizeclass) + onM(mcacheRefill_m) + releasem(mp) + s = c.alloc[sizeclass] + v = s.freelist + } + s.freelist = v.next + s.ref++ + //TODO: prefetch + x = unsafe.Pointer(v) + if flags&flagNoZero == 0 { + v.next = nil + if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 { + memclr(unsafe.Pointer(v), size) + } + } + } + c.local_cachealloc += intptr(size) + } else { + mp := acquirem() + mp.scalararg[0] = uintptr(size) + mp.scalararg[1] = uintptr(flags) + onM(largeAlloc_m) + s = (*mspan)(mp.ptrarg[0]) + mp.ptrarg[0] = nil + releasem(mp) + x = unsafe.Pointer(uintptr(s.start << pageShift)) + size = uintptr(s.elemsize) + } + + if flags&flagNoScan != 0 { + // All objects are pre-marked as noscan. + goto marked + } + + // If allocating a defer+arg block, now that we've picked a malloc size + // large enough to hold everything, cut the "asked for" size down to + // just the defer header, so that the GC bitmap will record the arg block + // as containing nothing at all (as if it were unused space at the end of + // a malloc block caused by size rounding). + // The defer arg areas are scanned as part of scanstack. + if typ == deferType { + size0 = unsafe.Sizeof(_defer{}) + } + + // From here till marked label marking the object as allocated + // and storing type info in the GC bitmap. + { + arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) + off := (uintptr(x) - arena_start) / ptrSize + xbits := (*uint8)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1)) + shift := (off % wordsPerBitmapByte) * gcBits + if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary { + println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask)) + gothrow("bad bits in markallocated") + } + + var ti, te uintptr + var ptrmask *uint8 + if size == ptrSize { + // It's one word and it has pointers, it must be a pointer. + *xbits |= (bitsPointer << 2) << shift + goto marked + } + if typ.kind&kindGCProg != 0 { + nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize + masksize := nptr + if masksize%2 != 0 { + masksize *= 2 // repeated + } + masksize = masksize * pointersPerByte / 8 // 4 bits per word + masksize++ // unroll flag in the beginning + if masksize > maxGCMask && typ.gc[1] != 0 { + // If the mask is too large, unroll the program directly + // into the GC bitmap. It's 7 times slower than copying + // from the pre-unrolled mask, but saves 1/16 of type size + // memory for the mask. + mp := acquirem() + mp.ptrarg[0] = x + mp.ptrarg[1] = unsafe.Pointer(typ) + mp.scalararg[0] = uintptr(size) + mp.scalararg[1] = uintptr(size0) + onM(unrollgcproginplace_m) + releasem(mp) + goto marked + } + ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0]))) + // Check whether the program is already unrolled. + if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 { + mp := acquirem() + mp.ptrarg[0] = unsafe.Pointer(typ) + onM(unrollgcprog_m) + releasem(mp) + } + ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte + } else { + ptrmask = (*uint8)(unsafe.Pointer(typ.gc[0])) // pointer to unrolled mask + } + if size == 2*ptrSize { + *xbits = *ptrmask | bitBoundary + goto marked + } + te = uintptr(typ.size) / ptrSize + // If the type occupies odd number of words, its mask is repeated. + if te%2 == 0 { + te /= 2 + } + // Copy pointer bitmask into the bitmap. + for i := uintptr(0); i < size0; i += 2 * ptrSize { + v := *(*uint8)(add(unsafe.Pointer(ptrmask), ti)) + ti++ + if ti == te { + ti = 0 + } + if i == 0 { + v |= bitBoundary + } + if i+ptrSize == size0 { + v &^= uint8(bitPtrMask << 4) + } + + *xbits = v + xbits = (*byte)(add(unsafe.Pointer(xbits), ^uintptr(0))) + } + if size0%(2*ptrSize) == 0 && size0 < size { + // Mark the word after last object's word as bitsDead. + *xbits = bitsDead << 2 + } + } +marked: + if raceenabled { + racemalloc(x, size) + } + + if debugMalloc { + mp := acquirem() + if mp.mallocing == 0 { + gothrow("bad malloc") + } + mp.mallocing = 0 + if mp.curg != nil { + mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard + } + // Note: one releasem for the acquirem just above. + // The other for the acquirem at start of malloc. + releasem(mp) + releasem(mp) + } + + if debug.allocfreetrace != 0 { + tracealloc(x, size, typ) + } + + if rate := MemProfileRate; rate > 0 { + if size < uintptr(rate) && int32(size) < c.next_sample { + c.next_sample -= int32(size) + } else { + mp := acquirem() + profilealloc(mp, x, size) + releasem(mp) + } + } + + if memstats.heap_alloc >= memstats.next_gc { + gogc(0) + } + + return x +} + +// implementation of new builtin +func newobject(typ *_type) unsafe.Pointer { + flags := uint32(0) + if typ.kind&kindNoPointers != 0 { + flags |= flagNoScan + } + return mallocgc(uintptr(typ.size), typ, flags) +} + +// implementation of make builtin for slices +func newarray(typ *_type, n uintptr) unsafe.Pointer { + flags := uint32(0) + if typ.kind&kindNoPointers != 0 { + flags |= flagNoScan + } + if int(n) < 0 || (typ.size > 0 && n > maxmem/uintptr(typ.size)) { + panic("runtime: allocation size out of range") + } + return mallocgc(uintptr(typ.size)*n, typ, flags) +} + +// rawmem returns a chunk of pointerless memory. It is +// not zeroed. +func rawmem(size uintptr) unsafe.Pointer { + return mallocgc(size, nil, flagNoScan|flagNoZero) +} + +// round size up to next size class +func goroundupsize(size uintptr) uintptr { + if size < maxSmallSize { + if size <= 1024-8 { + return uintptr(class_to_size[size_to_class8[(size+7)>>3]]) + } + return uintptr(class_to_size[size_to_class128[(size-1024+127)>>7]]) + } + if size+pageSize < size { + return size + } + return (size + pageSize - 1) &^ pageMask +} + +func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { + c := mp.mcache + rate := MemProfileRate + if size < uintptr(rate) { + // pick next profile time + // If you change this, also change allocmcache. + if rate > 0x3fffffff { // make 2*rate not overflow + rate = 0x3fffffff + } + next := int32(fastrand1()) % (2 * int32(rate)) + // Subtract the "remainder" of the current allocation. + // Otherwise objects that are close in size to sampling rate + // will be under-sampled, because we consistently discard this remainder. + next -= (int32(size) - c.next_sample) + if next < 0 { + next = 0 + } + c.next_sample = next + } + + mProf_Malloc(x, size) +} + +// force = 1 - do GC regardless of current heap usage +// force = 2 - go GC and eager sweep +func gogc(force int32) { + // The gc is turned off (via enablegc) until the bootstrap has completed. + // Also, malloc gets called in the guts of a number of libraries that might be + // holding locks. To avoid deadlocks during stoptheworld, don't bother + // trying to run gc while holding a lock. The next mallocgc without a lock + // will do the gc instead. + mp := acquirem() + if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 { + releasem(mp) + return + } + releasem(mp) + mp = nil + + semacquire(&worldsema, false) + + if force == 0 && memstats.heap_alloc < memstats.next_gc { + // typically threads which lost the race to grab + // worldsema exit here when gc is done. + semrelease(&worldsema) + return + } + + // Ok, we're doing it! Stop everybody else + startTime := nanotime() + mp = acquirem() + mp.gcing = 1 + releasem(mp) + onM(stoptheworld) + if mp != acquirem() { + gothrow("gogc: rescheduled") + } + + clearpools() + + // Run gc on the g0 stack. We do this so that the g stack + // we're currently running on will no longer change. Cuts + // the root set down a bit (g0 stacks are not scanned, and + // we don't need to scan gc's internal state). We also + // need to switch to g0 so we can shrink the stack. + n := 1 + if debug.gctrace > 1 { + n = 2 + } + for i := 0; i < n; i++ { + if i > 0 { + startTime = nanotime() + } + // switch to g0, call gc, then switch back + mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits + mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits + if force >= 2 { + mp.scalararg[2] = 1 // eagersweep + } else { + mp.scalararg[2] = 0 + } + onM(gc_m) + } + + // all done + mp.gcing = 0 + semrelease(&worldsema) + onM(starttheworld) + releasem(mp) + mp = nil + + // now that gc is done, kick off finalizer thread if needed + if !concurrentSweep { + // give the queued finalizers, if any, a chance to run + Gosched() + } +} + +// GC runs a garbage collection. +func GC() { + gogc(2) +} + +// linker-provided +var noptrdata struct{} +var enoptrdata struct{} +var noptrbss struct{} +var enoptrbss struct{} + +// SetFinalizer sets the finalizer associated with x to f. +// When the garbage collector finds an unreachable block +// with an associated finalizer, it clears the association and runs +// f(x) in a separate goroutine. This makes x reachable again, but +// now without an associated finalizer. Assuming that SetFinalizer +// is not called again, the next time the garbage collector sees +// that x is unreachable, it will free x. +// +// SetFinalizer(x, nil) clears any finalizer associated with x. +// +// The argument x must be a pointer to an object allocated by +// calling new or by taking the address of a composite literal. +// The argument f must be a function that takes a single argument +// to which x's type can be assigned, and can have arbitrary ignored return +// values. If either of these is not true, SetFinalizer aborts the +// program. +// +// Finalizers are run in dependency order: if A points at B, both have +// finalizers, and they are otherwise unreachable, only the finalizer +// for A runs; once A is freed, the finalizer for B can run. +// If a cyclic structure includes a block with a finalizer, that +// cycle is not guaranteed to be garbage collected and the finalizer +// is not guaranteed to run, because there is no ordering that +// respects the dependencies. +// +// The finalizer for x is scheduled to run at some arbitrary time after +// x becomes unreachable. +// There is no guarantee that finalizers will run before a program exits, +// so typically they are useful only for releasing non-memory resources +// associated with an object during a long-running program. +// For example, an os.File object could use a finalizer to close the +// associated operating system file descriptor when a program discards +// an os.File without calling Close, but it would be a mistake +// to depend on a finalizer to flush an in-memory I/O buffer such as a +// bufio.Writer, because the buffer would not be flushed at program exit. +// +// It is not guaranteed that a finalizer will run if the size of *x is +// zero bytes. +// +// It is not guaranteed that a finalizer will run for objects allocated +// in initializers for package-level variables. Such objects may be +// linker-allocated, not heap-allocated. +// +// A single goroutine runs all finalizers for a program, sequentially. +// If a finalizer must run for a long time, it should do so by starting +// a new goroutine. +func SetFinalizer(obj interface{}, finalizer interface{}) { + e := (*eface)(unsafe.Pointer(&obj)) + etyp := e._type + if etyp == nil { + gothrow("runtime.SetFinalizer: first argument is nil") + } + if etyp.kind&kindMask != kindPtr { + gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer") + } + ot := (*ptrtype)(unsafe.Pointer(etyp)) + if ot.elem == nil { + gothrow("nil elem type!") + } + + // find the containing object + _, base, _ := findObject(e.data) + + if base == nil { + // 0-length objects are okay. + if e.data == unsafe.Pointer(&zerobase) { + return + } + + // Global initializers might be linker-allocated. + // var Foo = &Object{} + // func main() { + // runtime.SetFinalizer(Foo, nil) + // } + // The relevant segments are: noptrdata, data, bss, noptrbss. + // We cannot assume they are in any order or even contiguous, + // due to external linking. + if uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrdata)) || + uintptr(unsafe.Pointer(&data)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&edata)) || + uintptr(unsafe.Pointer(&bss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&ebss)) || + uintptr(unsafe.Pointer(&noptrbss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) { + return + } + gothrow("runtime.SetFinalizer: pointer not in allocated block") + } + + if e.data != base { + // As an implementation detail we allow to set finalizers for an inner byte + // of an object if it could come from tiny alloc (see mallocgc for details). + if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize { + gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block") + } + } + + f := (*eface)(unsafe.Pointer(&finalizer)) + ftyp := f._type + if ftyp == nil { + // switch to M stack and remove finalizer + mp := acquirem() + mp.ptrarg[0] = e.data + onM(removeFinalizer_m) + releasem(mp) + return + } + + if ftyp.kind&kindMask != kindFunc { + gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function") + } + ft := (*functype)(unsafe.Pointer(ftyp)) + ins := *(*[]*_type)(unsafe.Pointer(&ft.in)) + if ft.dotdotdot || len(ins) != 1 { + gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) + } + fint := ins[0] + switch { + case fint == etyp: + // ok - same type + goto okarg + case fint.kind&kindMask == kindPtr: + if (fint.x == nil || fint.x.name == nil || etyp.x == nil || etyp.x.name == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem { + // ok - not same type, but both pointers, + // one or the other is unnamed, and same element type, so assignable. + goto okarg + } + case fint.kind&kindMask == kindInterface: + ityp := (*interfacetype)(unsafe.Pointer(fint)) + if len(ityp.mhdr) == 0 { + // ok - satisfies empty interface + goto okarg + } + if _, ok := assertE2I2(ityp, obj); ok { + goto okarg + } + } + gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string) +okarg: + // compute size needed for return parameters + nret := uintptr(0) + for _, t := range *(*[]*_type)(unsafe.Pointer(&ft.out)) { + nret = round(nret, uintptr(t.align)) + uintptr(t.size) + } + nret = round(nret, ptrSize) + + // make sure we have a finalizer goroutine + createfing() + + // switch to M stack to add finalizer record + mp := acquirem() + mp.ptrarg[0] = f.data + mp.ptrarg[1] = e.data + mp.scalararg[0] = nret + mp.ptrarg[2] = unsafe.Pointer(fint) + mp.ptrarg[3] = unsafe.Pointer(ot) + onM(setFinalizer_m) + if mp.scalararg[0] != 1 { + gothrow("runtime.SetFinalizer: finalizer already set") + } + releasem(mp) +} + +// round n up to a multiple of a. a must be a power of 2. +func round(n, a uintptr) uintptr { + return (n + a - 1) &^ (a - 1) +} + +// Look up pointer v in heap. Return the span containing the object, +// the start of the object, and the size of the object. If the object +// does not exist, return nil, nil, 0. +func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) { + c := gomcache() + c.local_nlookup++ + if ptrSize == 4 && c.local_nlookup >= 1<<30 { + // purge cache stats to prevent overflow + lock(&mheap_.lock) + purgecachedstats(c) + unlock(&mheap_.lock) + } + + // find span + arena_start := uintptr(unsafe.Pointer(mheap_.arena_start)) + arena_used := uintptr(unsafe.Pointer(mheap_.arena_used)) + if uintptr(v) < arena_start || uintptr(v) >= arena_used { + return + } + p := uintptr(v) >> pageShift + q := p - arena_start>>pageShift + s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize)) + if s == nil { + return + } + x = unsafe.Pointer(uintptr(s.start) << pageShift) + + if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse { + s = nil + x = nil + return + } + + n = uintptr(s.elemsize) + if s.sizeclass != 0 { + x = add(x, (uintptr(v)-uintptr(x))/n*n) + } + return +} + +var fingCreate uint32 + +func createfing() { + // start the finalizer goroutine exactly once + if fingCreate == 0 && cas(&fingCreate, 0, 1) { + go runfinq() + } +} + +// This is the goroutine that runs all of the finalizers +func runfinq() { + var ( + frame unsafe.Pointer + framecap uintptr + ) + + for { + lock(&finlock) + fb := finq + finq = nil + if fb == nil { + gp := getg() + fing = gp + fingwait = true + gp.issystem = true + goparkunlock(&finlock, "finalizer wait") + gp.issystem = false + continue + } + unlock(&finlock) + if raceenabled { + racefingo() + } + for fb != nil { + for i := int32(0); i < fb.cnt; i++ { + f := (*finalizer)(add(unsafe.Pointer(&fb.fin), uintptr(i)*unsafe.Sizeof(finalizer{}))) + + framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret) + if framecap < framesz { + // The frame does not contain pointers interesting for GC, + // all not yet finalized objects are stored in finq. + // If we do not mark it as FlagNoScan, + // the last finalized object is not collected. + frame = mallocgc(framesz, nil, flagNoScan) + framecap = framesz + } + + if f.fint == nil { + gothrow("missing type in runfinq") + } + switch f.fint.kind & kindMask { + case kindPtr: + // direct use of pointer + *(*unsafe.Pointer)(frame) = f.arg + case kindInterface: + ityp := (*interfacetype)(unsafe.Pointer(f.fint)) + // set up with empty interface + (*eface)(frame)._type = &f.ot.typ + (*eface)(frame).data = f.arg + if len(ityp.mhdr) != 0 { + // convert to interface with methods + // this conversion is guaranteed to succeed - we checked in SetFinalizer + *(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame)) + } + default: + gothrow("bad kind in runfinq") + } + reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz)) + + // drop finalizer queue references to finalized object + f.fn = nil + f.arg = nil + f.ot = nil + } + fb.cnt = 0 + next := fb.next + lock(&finlock) + fb.next = finc + finc = fb + unlock(&finlock) + fb = next + } + } +} + +var persistent struct { + lock mutex + pos unsafe.Pointer + end unsafe.Pointer +} + +// Wrapper around sysAlloc that can allocate small chunks. +// There is no associated free operation. +// Intended for things like function/type/debug-related persistent data. +// If align is 0, uses default align (currently 8). +func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer { + const ( + chunk = 256 << 10 + maxBlock = 64 << 10 // VM reservation granularity is 64K on windows + ) + + if align != 0 { + if align&(align-1) != 0 { + gothrow("persistentalloc: align is not a power of 2") + } + if align > _PageSize { + gothrow("persistentalloc: align is too large") + } + } else { + align = 8 + } + + if size >= maxBlock { + return sysAlloc(size, stat) + } + + lock(&persistent.lock) + persistent.pos = roundup(persistent.pos, align) + if uintptr(persistent.pos)+size > uintptr(persistent.end) { + persistent.pos = sysAlloc(chunk, &memstats.other_sys) + if persistent.pos == nil { + unlock(&persistent.lock) + gothrow("runtime: cannot allocate memory") + } + persistent.end = add(persistent.pos, chunk) + } + p := persistent.pos + persistent.pos = add(persistent.pos, size) + unlock(&persistent.lock) + + if stat != &memstats.other_sys { + xadd64(stat, int64(size)) + xadd64(&memstats.other_sys, -int64(size)) + } + return p +} diff --git a/libgo/go/runtime/malloc1.go b/libgo/go/runtime/malloc1.go deleted file mode 100644 index da92f4c..0000000 --- a/libgo/go/runtime/malloc1.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// trivial malloc test - -package main - -import ( - "flag" - "fmt" - "runtime" -) - -var chatty = flag.Bool("v", false, "chatty") - -func main() { - memstats := new(runtime.MemStats) - runtime.Free(runtime.Alloc(1)) - runtime.ReadMemStats(memstats) - if *chatty { - fmt.Printf("%+v %v\n", memstats, uint64(0)) - } -} diff --git a/libgo/go/runtime/mallocrand.go b/libgo/go/runtime/mallocrand.go deleted file mode 100644 index f1bcb89..0000000 --- a/libgo/go/runtime/mallocrand.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Random malloc test. - -package main - -import ( - "flag" - "math/rand" - "runtime" - "unsafe" -) - -var chatty = flag.Bool("v", false, "chatty") - -var footprint uint64 -var allocated uint64 - -func bigger() { - memstats := new(runtime.MemStats) - runtime.ReadMemStats(memstats) - if f := memstats.Sys; footprint < f { - footprint = f - if *chatty { - println("Footprint", footprint, " for ", allocated) - } - if footprint > 1e9 { - println("too big") - panic("fail") - } - } -} - -// Prime the data structures by allocating one of -// each block in order. After this, there should be -// little reason to ask for more memory from the OS. -func prime() { - for i := 0; i < 16; i++ { - b := runtime.Alloc(1 << uint(i)) - runtime.Free(b) - } - for i := uintptr(0); i < 256; i++ { - b := runtime.Alloc(i << 12) - runtime.Free(b) - } -} - -func memset(b *byte, c byte, n uintptr) { - np := uintptr(n) - for i := uintptr(0); i < np; i++ { - *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(b)) + i)) = c - } -} - -func main() { - flag.Parse() - // prime() - var blocks [1]struct { - base *byte - siz uintptr - } - for i := 0; i < 1<<10; i++ { - if i%(1<<10) == 0 && *chatty { - println(i) - } - b := rand.Int() % len(blocks) - if blocks[b].base != nil { - // println("Free", blocks[b].siz, blocks[b].base) - runtime.Free(blocks[b].base) - blocks[b].base = nil - allocated -= uint64(blocks[b].siz) - continue - } - siz := uintptr(rand.Int() >> (11 + rand.Uint32()%20)) - base := runtime.Alloc(siz) - // ptr := uintptr(syscall.BytePtr(base))+uintptr(siz/2) - // obj, size, ref, ok := allocator.find(ptr) - // if obj != base || *ref != 0 || !ok { - // println("find", siz, obj, ref, ok) - // panic("fail") - // } - blocks[b].base = base - blocks[b].siz = siz - allocated += uint64(siz) - // println("Alloc", siz, base) - memset(base, 0xbb, siz) - bigger() - } -} diff --git a/libgo/go/runtime/mallocrep.go b/libgo/go/runtime/mallocrep.go deleted file mode 100644 index 03ee71e..0000000 --- a/libgo/go/runtime/mallocrep.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Repeated malloc test. - -// +build ignore - -package main - -import ( - "flag" - "runtime" -) - -var chatty = flag.Bool("v", false, "chatty") - -var oldsys uint64 -var memstats runtime.MemStats - -func bigger() { - st := &memstats - runtime.ReadMemStats(st) - if oldsys < st.Sys { - oldsys = st.Sys - if *chatty { - println(st.Sys, " system bytes for ", st.Alloc, " Go bytes") - } - if st.Sys > 1e9 { - println("too big") - panic("fail") - } - } -} - -func main() { - runtime.GC() // clean up garbage from init - runtime.ReadMemStats(&memstats) // first call can do some allocations - runtime.MemProfileRate = 0 // disable profiler - stacks := memstats.Alloc // ignore stacks - flag.Parse() - for i := 0; i < 1<<7; i++ { - for j := 1; j <= 1<<22; j <<= 1 { - if i == 0 && *chatty { - println("First alloc:", j) - } - if a := memstats.Alloc - stacks; a != 0 { - println("no allocations but stats report", a, "bytes allocated") - panic("fail") - } - b := runtime.Alloc(uintptr(j)) - runtime.ReadMemStats(&memstats) - during := memstats.Alloc - stacks - runtime.Free(b) - runtime.ReadMemStats(&memstats) - if a := memstats.Alloc - stacks; a != 0 { - println("allocated ", j, ": wrong stats: during=", during, " after=", a, " (want 0)") - panic("fail") - } - bigger() - } - if i%(1<<10) == 0 && *chatty { - println(i) - } - if i == 0 { - if *chatty { - println("Primed", i) - } - // runtime.frozen = true - } - } -} diff --git a/libgo/go/runtime/mallocrep1.go b/libgo/go/runtime/mallocrep1.go deleted file mode 100644 index bc33e3a..0000000 --- a/libgo/go/runtime/mallocrep1.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Repeated malloc test. - -package main - -import ( - "flag" - "fmt" - "runtime" - "strconv" -) - -var chatty = flag.Bool("v", false, "chatty") -var reverse = flag.Bool("r", false, "reverse") -var longtest = flag.Bool("l", false, "long test") - -var b []*byte -var stats = new(runtime.MemStats) - -func OkAmount(size, n uintptr) bool { - if n < size { - return false - } - if size < 16*8 { - if n > size+16 { - return false - } - } else { - if n > size*9/8 { - return false - } - } - return true -} - -func AllocAndFree(size, count int) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) - if *chatty { - fmt.Printf("size=%d count=%d ...\n", size, count) - } - runtime.ReadMemStats(stats) - n1 := stats.Alloc - for i := 0; i < count; i++ { - b[i] = runtime.Alloc(uintptr(size)) - base, n := runtime.Lookup(b[i]) - if base != b[i] || !OkAmount(uintptr(size), n) { - println("lookup failed: got", base, n, "for", b[i]) - panic("fail") - } - runtime.ReadMemStats(stats) - if stats.Sys > 1e9 { - println("too much memory allocated") - panic("fail") - } - } - runtime.ReadMemStats(stats) - n2 := stats.Alloc - if *chatty { - fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats) - } - n3 := stats.Alloc - for j := 0; j < count; j++ { - i := j - if *reverse { - i = count - 1 - j - } - alloc := uintptr(stats.Alloc) - base, n := runtime.Lookup(b[i]) - if base != b[i] || !OkAmount(uintptr(size), n) { - println("lookup failed: got", base, n, "for", b[i]) - panic("fail") - } - runtime.Free(b[i]) - runtime.ReadMemStats(stats) - if stats.Alloc != uint64(alloc-n) { - println("free alloc got", stats.Alloc, "expected", alloc-n, "after free of", n) - panic("fail") - } - if stats.Sys > 1e9 { - println("too much memory allocated") - panic("fail") - } - } - runtime.ReadMemStats(stats) - n4 := stats.Alloc - - if *chatty { - fmt.Printf("size=%d count=%d stats=%+v\n", size, count, *stats) - } - if n2-n1 != n3-n4 { - println("wrong alloc count: ", n2-n1, n3-n4) - panic("fail") - } -} - -func atoi(s string) int { - i, _ := strconv.Atoi(s) - return i -} - -func main() { - runtime.MemProfileRate = 0 // disable profiler - flag.Parse() - b = make([]*byte, 10000) - if flag.NArg() > 0 { - AllocAndFree(atoi(flag.Arg(0)), atoi(flag.Arg(1))) - return - } - maxb := 1 << 22 - if !*longtest { - maxb = 1 << 19 - } - for j := 1; j <= maxb; j <<= 1 { - n := len(b) - max := uintptr(1 << 28) - if !*longtest { - max = uintptr(maxb) - } - if uintptr(j)*uintptr(n) > max { - n = int(max / uintptr(j)) - } - if n < 10 { - n = 10 - } - for m := 1; m <= n; { - AllocAndFree(j, m) - if m == n { - break - } - m = 5 * m / 4 - if m < 4 { - m++ - } - if m > n { - m = n - } - } - } -} diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go index d969025..7e4da90 100644 --- a/libgo/go/runtime/map_test.go +++ b/libgo/go/runtime/map_test.go @@ -260,7 +260,7 @@ func testConcurrentReadsAfterGrowth(t *testing.T, useReflect bool) { for nr := 0; nr < numReader; nr++ { go func() { defer wg.Done() - for _ = range m { + for range m { } }() go func() { @@ -423,30 +423,72 @@ func TestMapIterOrder(t *testing.T) { } for _, n := range [...]int{3, 7, 9, 15} { - // Make m be {0: true, 1: true, ..., n-1: true}. + for i := 0; i < 1000; i++ { + // Make m be {0: true, 1: true, ..., n-1: true}. + m := make(map[int]bool) + for i := 0; i < n; i++ { + m[i] = true + } + // Check that iterating over the map produces at least two different orderings. + ord := func() []int { + var s []int + for key := range m { + s = append(s, key) + } + return s + } + first := ord() + ok := false + for try := 0; try < 100; try++ { + if !reflect.DeepEqual(first, ord()) { + ok = true + break + } + } + if !ok { + t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) + break + } + } + } +} + +// Issue 8410 +func TestMapSparseIterOrder(t *testing.T) { + // Run several rounds to increase the probability + // of failure. One is not enough. + if runtime.Compiler == "gccgo" { + t.Skip("skipping for gccgo") + } +NextRound: + for round := 0; round < 10; round++ { m := make(map[int]bool) - for i := 0; i < n; i++ { + // Add 1000 items, remove 980. + for i := 0; i < 1000; i++ { m[i] = true } - // Check that iterating over the map produces at least two different orderings. - ord := func() []int { - var s []int - for key := range m { - s = append(s, key) - } - return s + for i := 20; i < 1000; i++ { + delete(m, i) } - first := ord() - ok := false - for try := 0; try < 100; try++ { - if !reflect.DeepEqual(first, ord()) { - ok = true - break - } + + var first []int + for i := range m { + first = append(first, i) } - if !ok { - t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) + + // 800 chances to get a different iteration order. + // See bug 8736 for why we need so many tries. + for n := 0; n < 800; n++ { + idx := 0 + for i := range m { + if i != first[idx] { + // iteration order changed. + continue NextRound + } + idx++ + } } + t.Fatalf("constant iteration order on round %d: %v", round, first) } } @@ -489,3 +531,24 @@ func TestMapStringBytesLookup(t *testing.T) { t.Errorf("AllocsPerRun for x,ok = m[string(buf)] = %v, want 0", n) } } + +func benchmarkMapPop(b *testing.B, n int) { + m := map[int]int{} + for i := 0; i < b.N; i++ { + for j := 0; j < n; j++ { + m[j] = j + } + for j := 0; j < n; j++ { + // Use iterator to pop an element. + // We want this to be fast, see issue 8412. + for k := range m { + delete(m, k) + break + } + } + } +} + +func BenchmarkMapPop100(b *testing.B) { benchmarkMapPop(b, 100) } +func BenchmarkMapPop1000(b *testing.B) { benchmarkMapPop(b, 1000) } +func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) } diff --git a/libgo/go/runtime/mapspeed_test.go b/libgo/go/runtime/mapspeed_test.go index da45ea1..119eb3f 100644 --- a/libgo/go/runtime/mapspeed_test.go +++ b/libgo/go/runtime/mapspeed_test.go @@ -241,7 +241,7 @@ func BenchmarkMapIter(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - for _, _ = range m { + for range m { } } } @@ -250,7 +250,7 @@ func BenchmarkMapIterEmpty(b *testing.B) { m := make(map[int]bool) b.ResetTimer() for i := 0; i < b.N; i++ { - for _, _ = range m { + for range m { } } } diff --git a/libgo/go/runtime/memmove_test.go b/libgo/go/runtime/memmove_test.go index 540f0fe..ffda4fe 100644 --- a/libgo/go/runtime/memmove_test.go +++ b/libgo/go/runtime/memmove_test.go @@ -162,43 +162,95 @@ func BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) } func BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) } func BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) } +func BenchmarkClearFat8(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [8 / 4]uint32 + _ = x + } +} +func BenchmarkClearFat12(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [12 / 4]uint32 + _ = x + } +} +func BenchmarkClearFat16(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [16 / 4]uint32 + _ = x + } +} +func BenchmarkClearFat24(b *testing.B) { + for i := 0; i < b.N; i++ { + var x [24 / 4]uint32 + _ = x + } +} func BenchmarkClearFat32(b *testing.B) { for i := 0; i < b.N; i++ { - var x [32]byte + var x [32 / 4]uint32 _ = x } } func BenchmarkClearFat64(b *testing.B) { for i := 0; i < b.N; i++ { - var x [64]byte + var x [64 / 4]uint32 _ = x } } func BenchmarkClearFat128(b *testing.B) { for i := 0; i < b.N; i++ { - var x [128]byte + var x [128 / 4]uint32 _ = x } } func BenchmarkClearFat256(b *testing.B) { for i := 0; i < b.N; i++ { - var x [256]byte + var x [256 / 4]uint32 _ = x } } func BenchmarkClearFat512(b *testing.B) { for i := 0; i < b.N; i++ { - var x [512]byte + var x [512 / 4]uint32 _ = x } } func BenchmarkClearFat1024(b *testing.B) { for i := 0; i < b.N; i++ { - var x [1024]byte + var x [1024 / 4]uint32 _ = x } } +func BenchmarkCopyFat8(b *testing.B) { + var x [8 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} +func BenchmarkCopyFat12(b *testing.B) { + var x [12 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} +func BenchmarkCopyFat16(b *testing.B) { + var x [16 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} +func BenchmarkCopyFat24(b *testing.B) { + var x [24 / 4]uint32 + for i := 0; i < b.N; i++ { + y := x + _ = y + } +} func BenchmarkCopyFat32(b *testing.B) { var x [32 / 4]uint32 for i := 0; i < b.N; i++ { diff --git a/libgo/go/runtime/mfinal_test.go b/libgo/go/runtime/mfinal_test.go index b47f83c..c51bfc6 100644 --- a/libgo/go/runtime/mfinal_test.go +++ b/libgo/go/runtime/mfinal_test.go @@ -44,10 +44,17 @@ func TestFinalizerType(t *testing.T) { {func(x *int) interface{} { return (*Tint)(x) }, func(v Tinter) { finalize((*int)(v.(*Tint))) }}, } - for _, tt := range finalizerTests { + for i, tt := range finalizerTests { done := make(chan bool, 1) go func() { - v := new(int) + // allocate struct with pointer to avoid hitting tinyalloc. + // Otherwise we can't be sure when the allocation will + // be freed. + type T struct { + v int + p unsafe.Pointer + } + v := &new(T).v *v = 97531 runtime.SetFinalizer(tt.convert(v), tt.finalizer) v = nil @@ -58,7 +65,7 @@ func TestFinalizerType(t *testing.T) { select { case <-ch: case <-time.After(time.Second * 4): - t.Errorf("finalizer for type %T didn't run", tt.finalizer) + t.Errorf("#%d: finalizer for type %T didn't run", i, tt.finalizer) } } } diff --git a/libgo/go/runtime/mgc0.go b/libgo/go/runtime/mgc0.go index 624485d..cbf5e9c 100644 --- a/libgo/go/runtime/mgc0.go +++ b/libgo/go/runtime/mgc0.go @@ -4,6 +4,8 @@ package runtime +import "unsafe" + // Called from C. Returns the Go type *m. func gc_m_ptr(ret *interface{}) { *ret = (*m)(nil) @@ -19,9 +21,132 @@ func gc_itab_ptr(ret *interface{}) { *ret = (*itab)(nil) } -func timenow() (sec int64, nsec int32) - func gc_unixnanotime(now *int64) { sec, nsec := timenow() *now = sec*1e9 + int64(nsec) } + +func freeOSMemory() { + gogc(2) // force GC and do eager sweep + onM(scavenge_m) +} + +var poolcleanup func() + +func registerPoolCleanup(f func()) { + poolcleanup = f +} + +func clearpools() { + // clear sync.Pools + if poolcleanup != nil { + poolcleanup() + } + + for _, p := range &allp { + if p == nil { + break + } + // clear tinyalloc pool + if c := p.mcache; c != nil { + c.tiny = nil + c.tinysize = 0 + + // disconnect cached list before dropping it on the floor, + // so that a dangling ref to one entry does not pin all of them. + var sg, sgnext *sudog + for sg = c.sudogcache; sg != nil; sg = sgnext { + sgnext = sg.next + sg.next = nil + } + c.sudogcache = nil + } + + // clear defer pools + for i := range p.deferpool { + // disconnect cached list before dropping it on the floor, + // so that a dangling ref to one entry does not pin all of them. + var d, dlink *_defer + for d = p.deferpool[i]; d != nil; d = dlink { + dlink = d.link + d.link = nil + } + p.deferpool[i] = nil + } + } +} + +func gosweepone() uintptr +func gosweepdone() bool + +func bgsweep() { + getg().issystem = true + for { + for gosweepone() != ^uintptr(0) { + sweep.nbgsweep++ + Gosched() + } + lock(&gclock) + if !gosweepdone() { + // This can happen if a GC runs between + // gosweepone returning ^0 above + // and the lock being acquired. + unlock(&gclock) + continue + } + sweep.parked = true + goparkunlock(&gclock, "GC sweep wait") + } +} + +// NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer, +// but if we do that, Go inserts a write barrier on *dst = src. +//go:nosplit +func writebarrierptr(dst *uintptr, src uintptr) { + *dst = src +} + +//go:nosplit +func writebarrierstring(dst *[2]uintptr, src [2]uintptr) { + dst[0] = src[0] + dst[1] = src[1] +} + +//go:nosplit +func writebarrierslice(dst *[3]uintptr, src [3]uintptr) { + dst[0] = src[0] + dst[1] = src[1] + dst[2] = src[2] +} + +//go:nosplit +func writebarrieriface(dst *[2]uintptr, src [2]uintptr) { + dst[0] = src[0] + dst[1] = src[1] +} + +//go:nosplit +func writebarrierfat2(dst *[2]uintptr, _ *byte, src [2]uintptr) { + dst[0] = src[0] + dst[1] = src[1] +} + +//go:nosplit +func writebarrierfat3(dst *[3]uintptr, _ *byte, src [3]uintptr) { + dst[0] = src[0] + dst[1] = src[1] + dst[2] = src[2] +} + +//go:nosplit +func writebarrierfat4(dst *[4]uintptr, _ *byte, src [4]uintptr) { + dst[0] = src[0] + dst[1] = src[1] + dst[2] = src[2] + dst[3] = src[3] +} + +//go:nosplit +func writebarrierfat(typ *_type, dst, src unsafe.Pointer) { + memmove(dst, src, typ.size) +} diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go new file mode 100644 index 0000000..d409c6c --- /dev/null +++ b/libgo/go/runtime/mprof.go @@ -0,0 +1,672 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Malloc profiling. +// Patterned after tcmalloc's algorithms; shorter code. + +package runtime + +import ( + "unsafe" +) + +// NOTE(rsc): Everything here could use cas if contention became an issue. +var proflock mutex + +// All memory allocations are local and do not escape outside of the profiler. +// The profiler is forbidden from referring to garbage-collected memory. + +const ( + // profile types + memProfile bucketType = 1 + iota + blockProfile + + // size of bucket hash table + buckHashSize = 179999 + + // max depth of stack to record in bucket + maxStack = 32 +) + +type bucketType int + +// A bucket holds per-call-stack profiling information. +// The representation is a bit sleazy, inherited from C. +// This struct defines the bucket header. It is followed in +// memory by the stack words and then the actual record +// data, either a memRecord or a blockRecord. +// +// Per-call-stack profiling information. +// Lookup by hashing call stack into a linked-list hash table. +type bucket struct { + next *bucket + allnext *bucket + typ bucketType // memBucket or blockBucket + hash uintptr + size uintptr + nstk uintptr +} + +// A memRecord is the bucket data for a bucket of type memProfile, +// part of the memory profile. +type memRecord struct { + // The following complex 3-stage scheme of stats accumulation + // is required to obtain a consistent picture of mallocs and frees + // for some point in time. + // The problem is that mallocs come in real time, while frees + // come only after a GC during concurrent sweeping. So if we would + // naively count them, we would get a skew toward mallocs. + // + // Mallocs are accounted in recent stats. + // Explicit frees are accounted in recent stats. + // GC frees are accounted in prev stats. + // After GC prev stats are added to final stats and + // recent stats are moved into prev stats. + allocs uintptr + frees uintptr + alloc_bytes uintptr + free_bytes uintptr + + // changes between next-to-last GC and last GC + prev_allocs uintptr + prev_frees uintptr + prev_alloc_bytes uintptr + prev_free_bytes uintptr + + // changes since last GC + recent_allocs uintptr + recent_frees uintptr + recent_alloc_bytes uintptr + recent_free_bytes uintptr +} + +// A blockRecord is the bucket data for a bucket of type blockProfile, +// part of the blocking profile. +type blockRecord struct { + count int64 + cycles int64 +} + +var ( + mbuckets *bucket // memory profile buckets + bbuckets *bucket // blocking profile buckets + buckhash *[179999]*bucket + bucketmem uintptr +) + +// newBucket allocates a bucket with the given type and number of stack entries. +func newBucket(typ bucketType, nstk int) *bucket { + size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0)) + switch typ { + default: + gothrow("invalid profile bucket type") + case memProfile: + size += unsafe.Sizeof(memRecord{}) + case blockProfile: + size += unsafe.Sizeof(blockRecord{}) + } + + b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys)) + bucketmem += size + b.typ = typ + b.nstk = uintptr(nstk) + return b +} + +// stk returns the slice in b holding the stack. +func (b *bucket) stk() []uintptr { + stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) + return stk[:b.nstk:b.nstk] +} + +// mp returns the memRecord associated with the memProfile bucket b. +func (b *bucket) mp() *memRecord { + if b.typ != memProfile { + gothrow("bad use of bucket.mp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*memRecord)(data) +} + +// bp returns the blockRecord associated with the blockProfile bucket b. +func (b *bucket) bp() *blockRecord { + if b.typ != blockProfile { + gothrow("bad use of bucket.bp") + } + data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0))) + return (*blockRecord)(data) +} + +// Return the bucket for stk[0:nstk], allocating new bucket if needed. +func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket { + if buckhash == nil { + buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys)) + if buckhash == nil { + gothrow("runtime: cannot allocate memory") + } + } + + // Hash stack. + var h uintptr + for _, pc := range stk { + h += pc + h += h << 10 + h ^= h >> 6 + } + // hash in size + h += size + h += h << 10 + h ^= h >> 6 + // finalize + h += h << 3 + h ^= h >> 11 + + i := int(h % buckHashSize) + for b := buckhash[i]; b != nil; b = b.next { + if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) { + return b + } + } + + if !alloc { + return nil + } + + // Create new bucket. + b := newBucket(typ, len(stk)) + copy(b.stk(), stk) + b.hash = h + b.size = size + b.next = buckhash[i] + buckhash[i] = b + if typ == memProfile { + b.allnext = mbuckets + mbuckets = b + } else { + b.allnext = bbuckets + bbuckets = b + } + return b +} + +func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer + +func eqslice(x, y []uintptr) bool { + if len(x) != len(y) { + return false + } + for i, xi := range x { + if xi != y[i] { + return false + } + } + return true +} + +func mprof_GC() { + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + mp.allocs += mp.prev_allocs + mp.frees += mp.prev_frees + mp.alloc_bytes += mp.prev_alloc_bytes + mp.free_bytes += mp.prev_free_bytes + + mp.prev_allocs = mp.recent_allocs + mp.prev_frees = mp.recent_frees + mp.prev_alloc_bytes = mp.recent_alloc_bytes + mp.prev_free_bytes = mp.recent_free_bytes + + mp.recent_allocs = 0 + mp.recent_frees = 0 + mp.recent_alloc_bytes = 0 + mp.recent_free_bytes = 0 + } +} + +// Record that a gc just happened: all the 'recent' statistics are now real. +func mProf_GC() { + lock(&proflock) + mprof_GC() + unlock(&proflock) +} + +// Called by malloc to record a profiled block. +func mProf_Malloc(p unsafe.Pointer, size uintptr) { + var stk [maxStack]uintptr + nstk := callers(4, &stk[0], len(stk)) + lock(&proflock) + b := stkbucket(memProfile, size, stk[:nstk], true) + mp := b.mp() + mp.recent_allocs++ + mp.recent_alloc_bytes += size + unlock(&proflock) + + // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock. + // This reduces potential contention and chances of deadlocks. + // Since the object must be alive during call to mProf_Malloc, + // it's fine to do this non-atomically. + setprofilebucket(p, b) +} + +func setprofilebucket_m() // mheap.c + +func setprofilebucket(p unsafe.Pointer, b *bucket) { + g := getg() + g.m.ptrarg[0] = p + g.m.ptrarg[1] = unsafe.Pointer(b) + onM(setprofilebucket_m) +} + +// Called when freeing a profiled block. +func mProf_Free(b *bucket, size uintptr, freed bool) { + lock(&proflock) + mp := b.mp() + if freed { + mp.recent_frees++ + mp.recent_free_bytes += size + } else { + mp.prev_frees++ + mp.prev_free_bytes += size + } + unlock(&proflock) +} + +var blockprofilerate uint64 // in CPU ticks + +// SetBlockProfileRate controls the fraction of goroutine blocking events +// that are reported in the blocking profile. The profiler aims to sample +// an average of one blocking event per rate nanoseconds spent blocked. +// +// To include every blocking event in the profile, pass rate = 1. +// To turn off profiling entirely, pass rate <= 0. +func SetBlockProfileRate(rate int) { + var r int64 + if rate <= 0 { + r = 0 // disable profiling + } else if rate == 1 { + r = 1 // profile everything + } else { + // convert ns to cycles, use float64 to prevent overflow during multiplication + r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000)) + if r == 0 { + r = 1 + } + } + + atomicstore64(&blockprofilerate, uint64(r)) +} + +func blockevent(cycles int64, skip int) { + if cycles <= 0 { + cycles = 1 + } + rate := int64(atomicload64(&blockprofilerate)) + if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) { + return + } + gp := getg() + var nstk int + var stk [maxStack]uintptr + if gp.m.curg == nil || gp.m.curg == gp { + nstk = callers(skip, &stk[0], len(stk)) + } else { + nstk = gcallers(gp.m.curg, skip, &stk[0], len(stk)) + } + lock(&proflock) + b := stkbucket(blockProfile, 0, stk[:nstk], true) + b.bp().count++ + b.bp().cycles += cycles + unlock(&proflock) +} + +// Go interface to profile data. + +// A StackRecord describes a single execution stack. +type StackRecord struct { + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *StackRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfileRate controls the fraction of memory allocations +// that are recorded and reported in the memory profile. +// The profiler aims to sample an average of +// one allocation per MemProfileRate bytes allocated. +// +// To include every allocated block in the profile, set MemProfileRate to 1. +// To turn off profiling entirely, set MemProfileRate to 0. +// +// The tools that process the memory profiles assume that the +// profile rate is constant across the lifetime of the program +// and equal to the current value. Programs that change the +// memory profiling rate should do so just once, as early as +// possible in the execution of the program (for example, +// at the beginning of main). +var MemProfileRate int = 512 * 1024 + +// A MemProfileRecord describes the live objects allocated +// by a particular call sequence (stack trace). +type MemProfileRecord struct { + AllocBytes, FreeBytes int64 // number of bytes allocated, freed + AllocObjects, FreeObjects int64 // number of objects allocated, freed + Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry +} + +// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes). +func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes } + +// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects). +func (r *MemProfileRecord) InUseObjects() int64 { + return r.AllocObjects - r.FreeObjects +} + +// Stack returns the stack trace associated with the record, +// a prefix of r.Stack0. +func (r *MemProfileRecord) Stack() []uintptr { + for i, v := range r.Stack0 { + if v == 0 { + return r.Stack0[0:i] + } + } + return r.Stack0[0:] +} + +// MemProfile returns n, the number of records in the current memory profile. +// If len(p) >= n, MemProfile copies the profile into p and returns n, true. +// If len(p) < n, MemProfile does not change p and returns n, false. +// +// If inuseZero is true, the profile includes allocation records +// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes. +// These are sites where memory was allocated, but it has all +// been released back to the runtime. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.memprofile flag instead +// of calling MemProfile directly. +func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) { + lock(&proflock) + clear := true + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { + n++ + } + if mp.allocs != 0 || mp.frees != 0 { + clear = false + } + } + if clear { + // Absolutely no data, suggesting that a garbage collection + // has not yet happened. In order to allow profiling when + // garbage collection is disabled from the beginning of execution, + // accumulate stats as if a GC just happened, and recount buckets. + mprof_GC() + mprof_GC() + n = 0 + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { + n++ + } + } + } + if n <= len(p) { + ok = true + idx := 0 + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + if inuseZero || mp.alloc_bytes != mp.free_bytes { + record(&p[idx], b) + idx++ + } + } + } + unlock(&proflock) + return +} + +// Write b's data to r. +func record(r *MemProfileRecord, b *bucket) { + mp := b.mp() + r.AllocBytes = int64(mp.alloc_bytes) + r.FreeBytes = int64(mp.free_bytes) + r.AllocObjects = int64(mp.allocs) + r.FreeObjects = int64(mp.frees) + copy(r.Stack0[:], b.stk()) + for i := int(b.nstk); i < len(r.Stack0); i++ { + r.Stack0[i] = 0 + } +} + +func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) { + lock(&proflock) + for b := mbuckets; b != nil; b = b.allnext { + mp := b.mp() + fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees) + } + unlock(&proflock) +} + +// BlockProfileRecord describes blocking events originated +// at a particular call sequence (stack trace). +type BlockProfileRecord struct { + Count int64 + Cycles int64 + StackRecord +} + +// BlockProfile returns n, the number of records in the current blocking profile. +// If len(p) >= n, BlockProfile copies the profile into p and returns n, true. +// If len(p) < n, BlockProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package or +// the testing package's -test.blockprofile flag instead +// of calling BlockProfile directly. +func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { + lock(&proflock) + for b := bbuckets; b != nil; b = b.allnext { + n++ + } + if n <= len(p) { + ok = true + for b := bbuckets; b != nil; b = b.allnext { + bp := b.bp() + r := &p[0] + r.Count = int64(bp.count) + r.Cycles = int64(bp.cycles) + i := copy(r.Stack0[:], b.stk()) + for ; i < len(r.Stack0); i++ { + r.Stack0[i] = 0 + } + p = p[1:] + } + } + unlock(&proflock) + return +} + +// ThreadCreateProfile returns n, the number of records in the thread creation profile. +// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true. +// If len(p) < n, ThreadCreateProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package instead +// of calling ThreadCreateProfile directly. +func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { + first := (*m)(atomicloadp(unsafe.Pointer(&allm))) + for mp := first; mp != nil; mp = mp.alllink { + n++ + } + if n <= len(p) { + ok = true + i := 0 + for mp := first; mp != nil; mp = mp.alllink { + for s := range mp.createstack { + p[i].Stack0[s] = uintptr(mp.createstack[s]) + } + i++ + } + } + return +} + +var allgs []*g // proc.c + +// GoroutineProfile returns n, the number of records in the active goroutine stack profile. +// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true. +// If len(p) < n, GoroutineProfile does not change p and returns n, false. +// +// Most clients should use the runtime/pprof package instead +// of calling GoroutineProfile directly. +func GoroutineProfile(p []StackRecord) (n int, ok bool) { + + n = NumGoroutine() + if n <= len(p) { + gp := getg() + semacquire(&worldsema, false) + gp.m.gcing = 1 + onM(stoptheworld) + + n = NumGoroutine() + if n <= len(p) { + ok = true + r := p + sp := getcallersp(unsafe.Pointer(&p)) + pc := getcallerpc(unsafe.Pointer(&p)) + onM(func() { + saveg(pc, sp, gp, &r[0]) + }) + r = r[1:] + for _, gp1 := range allgs { + if gp1 == gp || readgstatus(gp1) == _Gdead { + continue + } + saveg(^uintptr(0), ^uintptr(0), gp1, &r[0]) + r = r[1:] + } + } + + gp.m.gcing = 0 + semrelease(&worldsema) + onM(starttheworld) + } + + return n, ok +} + +func saveg(pc, sp uintptr, gp *g, r *StackRecord) { + n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0) + if n < len(r.Stack0) { + r.Stack0[n] = 0 + } +} + +// Stack formats a stack trace of the calling goroutine into buf +// and returns the number of bytes written to buf. +// If all is true, Stack formats stack traces of all other goroutines +// into buf after the trace for the current goroutine. +func Stack(buf []byte, all bool) int { + mp := acquirem() + gp := mp.curg + if all { + semacquire(&worldsema, false) + mp.gcing = 1 + releasem(mp) + onM(stoptheworld) + if mp != acquirem() { + gothrow("Stack: rescheduled") + } + } + + n := 0 + if len(buf) > 0 { + sp := getcallersp(unsafe.Pointer(&buf)) + pc := getcallerpc(unsafe.Pointer(&buf)) + onM(func() { + g0 := getg() + g0.writebuf = buf[0:0:len(buf)] + goroutineheader(gp) + traceback(pc, sp, 0, gp) + if all { + tracebackothers(gp) + } + n = len(g0.writebuf) + g0.writebuf = nil + }) + } + + if all { + mp.gcing = 0 + semrelease(&worldsema) + onM(starttheworld) + } + releasem(mp) + return n +} + +// Tracing of alloc/free/gc. + +var tracelock mutex + +func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + if typ == nil { + print("tracealloc(", p, ", ", hex(size), ")\n") + } else { + print("tracealloc(", p, ", ", hex(size), ", ", *typ._string, ")\n") + } + if gp.m.curg == nil || gp == gp.m.curg { + goroutineheader(gp) + pc := getcallerpc(unsafe.Pointer(&p)) + sp := getcallersp(unsafe.Pointer(&p)) + onM(func() { + traceback(pc, sp, 0, gp) + }) + } else { + goroutineheader(gp.m.curg) + traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg) + } + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} + +func tracefree(p unsafe.Pointer, size uintptr) { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + print("tracefree(", p, ", ", hex(size), ")\n") + goroutineheader(gp) + pc := getcallerpc(unsafe.Pointer(&p)) + sp := getcallersp(unsafe.Pointer(&p)) + onM(func() { + traceback(pc, sp, 0, gp) + }) + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} + +func tracegc() { + lock(&tracelock) + gp := getg() + gp.m.traceback = 2 + print("tracegc()\n") + // running on m->g0 stack; show all non-g0 goroutines + tracebackothers(gp) + print("end tracegc\n") + print("\n") + gp.m.traceback = 0 + unlock(&tracelock) +} diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go new file mode 100644 index 0000000..3456e02 --- /dev/null +++ b/libgo/go/runtime/netpoll.go @@ -0,0 +1,455 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows + +package runtime + +import "unsafe" + +// Integrated network poller (platform-independent part). +// A particular implementation (epoll/kqueue) must define the following functions: +// func netpollinit() // to initialize the poller +// func netpollopen(fd uintptr, pd *pollDesc) int32 // to arm edge-triggered notifications +// and associate fd with pd. +// An implementation must call the following function to denote that the pd is ready. +// func netpollready(gpp **g, pd *pollDesc, mode int32) + +// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer +// goroutines respectively. The semaphore can be in the following states: +// pdReady - io readiness notification is pending; +// a goroutine consumes the notification by changing the state to nil. +// pdWait - a goroutine prepares to park on the semaphore, but not yet parked; +// the goroutine commits to park by changing the state to G pointer, +// or, alternatively, concurrent io notification changes the state to READY, +// or, alternatively, concurrent timeout/close changes the state to nil. +// G pointer - the goroutine is blocked on the semaphore; +// io notification or timeout/close changes the state to READY or nil respectively +// and unparks the goroutine. +// nil - nothing of the above. +const ( + pdReady uintptr = 1 + pdWait uintptr = 2 +) + +const pollBlockSize = 4 * 1024 + +// Network poller descriptor. +type pollDesc struct { + link *pollDesc // in pollcache, protected by pollcache.lock + + // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations. + // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime. + // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification) + // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated + // in a lock-free way by all operations. + // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg), + // that will blow up when GC starts moving objects. + lock mutex // protectes the following fields + fd uintptr + closing bool + seq uintptr // protects from stale timers and ready notifications + rg uintptr // pdReady, pdWait, G waiting for read or nil + rt timer // read deadline timer (set if rt.f != nil) + rd int64 // read deadline + wg uintptr // pdReady, pdWait, G waiting for write or nil + wt timer // write deadline timer + wd int64 // write deadline + user unsafe.Pointer // user settable cookie +} + +type pollCache struct { + lock mutex + first *pollDesc + // PollDesc objects must be type-stable, + // because we can get ready notification from epoll/kqueue + // after the descriptor is closed/reused. + // Stale notifications are detected using seq variable, + // seq is incremented when deadlines are changed or descriptor is reused. +} + +var pollcache pollCache + +func netpollServerInit() { + onM(netpollinit) +} + +func netpollOpen(fd uintptr) (*pollDesc, int) { + pd := pollcache.alloc() + lock(&pd.lock) + if pd.wg != 0 && pd.wg != pdReady { + gothrow("netpollOpen: blocked write on free descriptor") + } + if pd.rg != 0 && pd.rg != pdReady { + gothrow("netpollOpen: blocked read on free descriptor") + } + pd.fd = fd + pd.closing = false + pd.seq++ + pd.rg = 0 + pd.rd = 0 + pd.wg = 0 + pd.wd = 0 + unlock(&pd.lock) + + var errno int32 + onM(func() { + errno = netpollopen(fd, pd) + }) + return pd, int(errno) +} + +func netpollClose(pd *pollDesc) { + if !pd.closing { + gothrow("netpollClose: close w/o unblock") + } + if pd.wg != 0 && pd.wg != pdReady { + gothrow("netpollClose: blocked write on closing descriptor") + } + if pd.rg != 0 && pd.rg != pdReady { + gothrow("netpollClose: blocked read on closing descriptor") + } + onM(func() { + netpollclose(uintptr(pd.fd)) + }) + pollcache.free(pd) +} + +func (c *pollCache) free(pd *pollDesc) { + lock(&c.lock) + pd.link = c.first + c.first = pd + unlock(&c.lock) +} + +func netpollReset(pd *pollDesc, mode int) int { + err := netpollcheckerr(pd, int32(mode)) + if err != 0 { + return err + } + if mode == 'r' { + pd.rg = 0 + } else if mode == 'w' { + pd.wg = 0 + } + return 0 +} + +func netpollWait(pd *pollDesc, mode int) int { + err := netpollcheckerr(pd, int32(mode)) + if err != 0 { + return err + } + // As for now only Solaris uses level-triggered IO. + if GOOS == "solaris" { + onM(func() { + netpollarm(pd, mode) + }) + } + for !netpollblock(pd, int32(mode), false) { + err = netpollcheckerr(pd, int32(mode)) + if err != 0 { + return err + } + // Can happen if timeout has fired and unblocked us, + // but before we had a chance to run, timeout has been reset. + // Pretend it has not happened and retry. + } + return 0 +} + +func netpollWaitCanceled(pd *pollDesc, mode int) { + // This function is used only on windows after a failed attempt to cancel + // a pending async IO operation. Wait for ioready, ignore closing or timeouts. + for !netpollblock(pd, int32(mode), true) { + } +} + +func netpollSetDeadline(pd *pollDesc, d int64, mode int) { + lock(&pd.lock) + if pd.closing { + unlock(&pd.lock) + return + } + pd.seq++ // invalidate current timers + // Reset current timers. + if pd.rt.f != nil { + deltimer(&pd.rt) + pd.rt.f = nil + } + if pd.wt.f != nil { + deltimer(&pd.wt) + pd.wt.f = nil + } + // Setup new timers. + if d != 0 && d <= nanotime() { + d = -1 + } + if mode == 'r' || mode == 'r'+'w' { + pd.rd = d + } + if mode == 'w' || mode == 'r'+'w' { + pd.wd = d + } + if pd.rd > 0 && pd.rd == pd.wd { + pd.rt.f = netpollDeadline + pd.rt.when = pd.rd + // Copy current seq into the timer arg. + // Timer func will check the seq against current descriptor seq, + // if they differ the descriptor was reused or timers were reset. + pd.rt.arg = pd + pd.rt.seq = pd.seq + addtimer(&pd.rt) + } else { + if pd.rd > 0 { + pd.rt.f = netpollReadDeadline + pd.rt.when = pd.rd + pd.rt.arg = pd + pd.rt.seq = pd.seq + addtimer(&pd.rt) + } + if pd.wd > 0 { + pd.wt.f = netpollWriteDeadline + pd.wt.when = pd.wd + pd.wt.arg = pd + pd.wt.seq = pd.seq + addtimer(&pd.wt) + } + } + // If we set the new deadline in the past, unblock currently pending IO if any. + var rg, wg *g + atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock + if pd.rd < 0 { + rg = netpollunblock(pd, 'r', false) + } + if pd.wd < 0 { + wg = netpollunblock(pd, 'w', false) + } + unlock(&pd.lock) + if rg != nil { + goready(rg) + } + if wg != nil { + goready(wg) + } +} + +func netpollUnblock(pd *pollDesc) { + lock(&pd.lock) + if pd.closing { + gothrow("netpollUnblock: already closing") + } + pd.closing = true + pd.seq++ + var rg, wg *g + atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock + rg = netpollunblock(pd, 'r', false) + wg = netpollunblock(pd, 'w', false) + if pd.rt.f != nil { + deltimer(&pd.rt) + pd.rt.f = nil + } + if pd.wt.f != nil { + deltimer(&pd.wt) + pd.wt.f = nil + } + unlock(&pd.lock) + if rg != nil { + goready(rg) + } + if wg != nil { + goready(wg) + } +} + +func netpollfd(pd *pollDesc) uintptr { + return pd.fd +} + +func netpolluser(pd *pollDesc) *unsafe.Pointer { + return &pd.user +} + +func netpollclosing(pd *pollDesc) bool { + return pd.closing +} + +func netpolllock(pd *pollDesc) { + lock(&pd.lock) +} + +func netpollunlock(pd *pollDesc) { + unlock(&pd.lock) +} + +// make pd ready, newly runnable goroutines (if any) are returned in rg/wg +func netpollready(gpp **g, pd *pollDesc, mode int32) { + var rg, wg *g + if mode == 'r' || mode == 'r'+'w' { + rg = netpollunblock(pd, 'r', true) + } + if mode == 'w' || mode == 'r'+'w' { + wg = netpollunblock(pd, 'w', true) + } + if rg != nil { + rg.schedlink = *gpp + *gpp = rg + } + if wg != nil { + wg.schedlink = *gpp + *gpp = wg + } +} + +func netpollcheckerr(pd *pollDesc, mode int32) int { + if pd.closing { + return 1 // errClosing + } + if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) { + return 2 // errTimeout + } + return 0 +} + +func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool { + return casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp))) +} + +// returns true if IO is ready, or false if timedout or closed +// waitio - wait only for completed IO, ignore errors +func netpollblock(pd *pollDesc, mode int32, waitio bool) bool { + gpp := &pd.rg + if mode == 'w' { + gpp = &pd.wg + } + + // set the gpp semaphore to WAIT + for { + old := *gpp + if old == pdReady { + *gpp = 0 + return true + } + if old != 0 { + gothrow("netpollblock: double wait") + } + if casuintptr(gpp, 0, pdWait) { + break + } + } + + // need to recheck error states after setting gpp to WAIT + // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl + // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg + if waitio || netpollcheckerr(pd, mode) == 0 { + f := netpollblockcommit + gopark(**(**unsafe.Pointer)(unsafe.Pointer(&f)), unsafe.Pointer(gpp), "IO wait") + } + // be careful to not lose concurrent READY notification + old := xchguintptr(gpp, 0) + if old > pdWait { + gothrow("netpollblock: corrupted state") + } + return old == pdReady +} + +func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g { + gpp := &pd.rg + if mode == 'w' { + gpp = &pd.wg + } + + for { + old := *gpp + if old == pdReady { + return nil + } + if old == 0 && !ioready { + // Only set READY for ioready. runtime_pollWait + // will check for timeout/cancel before waiting. + return nil + } + var new uintptr + if ioready { + new = pdReady + } + if casuintptr(gpp, old, new) { + if old == pdReady || old == pdWait { + old = 0 + } + return (*g)(unsafe.Pointer(old)) + } + } +} + +func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) { + lock(&pd.lock) + // Seq arg is seq when the timer was set. + // If it's stale, ignore the timer event. + if seq != pd.seq { + // The descriptor was reused or timers were reset. + unlock(&pd.lock) + return + } + var rg *g + if read { + if pd.rd <= 0 || pd.rt.f == nil { + gothrow("netpolldeadlineimpl: inconsistent read deadline") + } + pd.rd = -1 + atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock + rg = netpollunblock(pd, 'r', false) + } + var wg *g + if write { + if pd.wd <= 0 || pd.wt.f == nil && !read { + gothrow("netpolldeadlineimpl: inconsistent write deadline") + } + pd.wd = -1 + atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock + wg = netpollunblock(pd, 'w', false) + } + unlock(&pd.lock) + if rg != nil { + goready(rg) + } + if wg != nil { + goready(wg) + } +} + +func netpollDeadline(arg interface{}, seq uintptr) { + netpolldeadlineimpl(arg.(*pollDesc), seq, true, true) +} + +func netpollReadDeadline(arg interface{}, seq uintptr) { + netpolldeadlineimpl(arg.(*pollDesc), seq, true, false) +} + +func netpollWriteDeadline(arg interface{}, seq uintptr) { + netpolldeadlineimpl(arg.(*pollDesc), seq, false, true) +} + +func (c *pollCache) alloc() *pollDesc { + lock(&c.lock) + if c.first == nil { + const pdSize = unsafe.Sizeof(pollDesc{}) + n := pollBlockSize / pdSize + if n == 0 { + n = 1 + } + // Must be in non-GC memory because can be referenced + // only from epoll/kqueue internals. + mem := persistentalloc(n*pdSize, 0, &memstats.other_sys) + for i := uintptr(0); i < n; i++ { + pd := (*pollDesc)(add(mem, i*pdSize)) + pd.link = c.first + c.first = pd + } + } + pd := c.first + c.first = pd.link + unlock(&c.lock) + return pd +} diff --git a/libgo/go/runtime/netpoll_epoll.go b/libgo/go/runtime/netpoll_epoll.go new file mode 100644 index 0000000..ecfc9cd --- /dev/null +++ b/libgo/go/runtime/netpoll_epoll.go @@ -0,0 +1,97 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package runtime + +import "unsafe" + +func epollcreate(size int32) int32 +func epollcreate1(flags int32) int32 + +//go:noescape +func epollctl(epfd, op, fd int32, ev *epollevent) int32 + +//go:noescape +func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32 +func closeonexec(fd int32) + +var ( + epfd int32 = -1 // epoll descriptor + netpolllasterr int32 +) + +func netpollinit() { + epfd = epollcreate1(_EPOLL_CLOEXEC) + if epfd >= 0 { + return + } + epfd = epollcreate(1024) + if epfd >= 0 { + closeonexec(epfd) + return + } + println("netpollinit: failed to create epoll descriptor", -epfd) + gothrow("netpollinit: failed to create descriptor") +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + var ev epollevent + ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET + *(**pollDesc)(unsafe.Pointer(&ev.data)) = pd + return -epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev) +} + +func netpollclose(fd uintptr) int32 { + var ev epollevent + return -epollctl(epfd, _EPOLL_CTL_DEL, int32(fd), &ev) +} + +func netpollarm(pd *pollDesc, mode int) { + gothrow("unused") +} + +// polls for ready network connections +// returns list of goroutines that become runnable +func netpoll(block bool) (gp *g) { + if epfd == -1 { + return + } + waitms := int32(-1) + if !block { + waitms = 0 + } + var events [128]epollevent +retry: + n := epollwait(epfd, &events[0], int32(len(events)), waitms) + if n < 0 { + if n != -_EINTR && n != netpolllasterr { + netpolllasterr = n + println("runtime: epollwait on fd", epfd, "failed with", -n) + } + goto retry + } + for i := int32(0); i < n; i++ { + ev := &events[i] + if ev.events == 0 { + continue + } + var mode int32 + if ev.events&(_EPOLLIN|_EPOLLRDHUP|_EPOLLHUP|_EPOLLERR) != 0 { + mode += 'r' + } + if ev.events&(_EPOLLOUT|_EPOLLHUP|_EPOLLERR) != 0 { + mode += 'w' + } + if mode != 0 { + pd := *(**pollDesc)(unsafe.Pointer(&ev.data)) + netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode) + } + } + if block && gp == nil { + goto retry + } + return gp +} diff --git a/libgo/go/runtime/netpoll_kqueue.go b/libgo/go/runtime/netpoll_kqueue.go new file mode 100644 index 0000000..d6d55b9 --- /dev/null +++ b/libgo/go/runtime/netpoll_kqueue.go @@ -0,0 +1,101 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package runtime + +// Integrated network poller (kqueue-based implementation). + +import "unsafe" + +func kqueue() int32 + +//go:noescape +func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32 +func closeonexec(fd int32) + +var ( + kq int32 = -1 + netpolllasterr int32 +) + +func netpollinit() { + kq = kqueue() + if kq < 0 { + println("netpollinit: kqueue failed with", -kq) + gothrow("netpollinit: kqueue failed") + } + closeonexec(kq) +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR) + // for the whole fd lifetime. The notifications are automatically unregistered + // when fd is closed. + var ev [2]keventt + *(*uintptr)(unsafe.Pointer(&ev[0].ident)) = fd + ev[0].filter = _EVFILT_READ + ev[0].flags = _EV_ADD | _EV_CLEAR + ev[0].fflags = 0 + ev[0].data = 0 + ev[0].udata = (*byte)(unsafe.Pointer(pd)) + ev[1] = ev[0] + ev[1].filter = _EVFILT_WRITE + n := kevent(kq, &ev[0], 2, nil, 0, nil) + if n < 0 { + return -n + } + return 0 +} + +func netpollclose(fd uintptr) int32 { + // Don't need to unregister because calling close() + // on fd will remove any kevents that reference the descriptor. + return 0 +} + +func netpollarm(pd *pollDesc, mode int) { + gothrow("unused") +} + +// Polls for ready network connections. +// Returns list of goroutines that become runnable. +func netpoll(block bool) (gp *g) { + if kq == -1 { + return + } + var tp *timespec + var ts timespec + if !block { + tp = &ts + } + var events [64]keventt +retry: + n := kevent(kq, nil, 0, &events[0], int32(len(events)), tp) + if n < 0 { + if n != -_EINTR && n != netpolllasterr { + netpolllasterr = n + println("runtime: kevent on fd", kq, "failed with", -n) + } + goto retry + } + for i := 0; i < int(n); i++ { + ev := &events[i] + var mode int32 + if ev.filter == _EVFILT_READ { + mode += 'r' + } + if ev.filter == _EVFILT_WRITE { + mode += 'w' + } + if mode != 0 { + netpollready((**g)(noescape(unsafe.Pointer(&gp))), (*pollDesc)(unsafe.Pointer(ev.udata)), mode) + } + } + if block && gp == nil { + goto retry + } + return gp +} diff --git a/libgo/go/runtime/netpoll_nacl.go b/libgo/go/runtime/netpoll_nacl.go new file mode 100644 index 0000000..5cbc300 --- /dev/null +++ b/libgo/go/runtime/netpoll_nacl.go @@ -0,0 +1,26 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fake network poller for NaCl. +// Should never be used, because NaCl network connections do not honor "SetNonblock". + +package runtime + +func netpollinit() { +} + +func netpollopen(fd uintptr, pd *pollDesc) int32 { + return 0 +} + +func netpollclose(fd uintptr) int32 { + return 0 +} + +func netpollarm(pd *pollDesc, mode int) { +} + +func netpoll(block bool) *g { + return nil +} diff --git a/libgo/go/runtime/noasm_arm.go b/libgo/go/runtime/noasm_arm.go new file mode 100644 index 0000000..dd3ef82 --- /dev/null +++ b/libgo/go/runtime/noasm_arm.go @@ -0,0 +1,54 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Routines that are implemented in assembly in asm_{amd64,386}.s +// but are implemented in Go for arm. + +package runtime + +func cmpstring(s1, s2 string) int { + l := len(s1) + if len(s2) < l { + l = len(s2) + } + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(s1) < len(s2) { + return -1 + } + if len(s1) > len(s2) { + return +1 + } + return 0 +} + +func cmpbytes(s1, s2 []byte) int { + l := len(s1) + if len(s2) < l { + l = len(s2) + } + for i := 0; i < l; i++ { + c1, c2 := s1[i], s2[i] + if c1 < c2 { + return -1 + } + if c1 > c2 { + return +1 + } + } + if len(s1) < len(s2) { + return -1 + } + if len(s1) > len(s2) { + return +1 + } + return 0 +} diff --git a/libgo/go/runtime/os_darwin.go b/libgo/go/runtime/os_darwin.go new file mode 100644 index 0000000..4327ced --- /dev/null +++ b/libgo/go/runtime/os_darwin.go @@ -0,0 +1,24 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func bsdthread_create(stk, mm, gg, fn unsafe.Pointer) int32 +func bsdthread_register() int32 +func mach_msg_trap(h unsafe.Pointer, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32 +func mach_reply_port() uint32 +func mach_task_self() uint32 +func mach_thread_self() uint32 +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func sigprocmask(sig int32, new, old unsafe.Pointer) +func sigaction(mode uint32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigtramp() +func setitimer(mode int32, new, old unsafe.Pointer) +func mach_semaphore_wait(sema uint32) int32 +func mach_semaphore_timedwait(sema, sec, nsec uint32) int32 +func mach_semaphore_signal(sema uint32) int32 +func mach_semaphore_signal_all(sema uint32) int32 diff --git a/libgo/go/runtime/os_dragonfly.go b/libgo/go/runtime/os_dragonfly.go new file mode 100644 index 0000000..cdaa069 --- /dev/null +++ b/libgo/go/runtime/os_dragonfly.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func lwp_create(param unsafe.Pointer) int32 +func sigaltstack(new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigprocmask(new, old unsafe.Pointer) +func setitimer(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func getrlimit(kind int32, limit unsafe.Pointer) int32 +func raise(sig int32) +func sys_umtx_sleep(addr unsafe.Pointer, val, timeout int32) int32 +func sys_umtx_wakeup(addr unsafe.Pointer, val int32) int32 + +const stackSystem = 0 diff --git a/libgo/go/runtime/os_freebsd.go b/libgo/go/runtime/os_freebsd.go new file mode 100644 index 0000000..5970804 --- /dev/null +++ b/libgo/go/runtime/os_freebsd.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func thr_new(param unsafe.Pointer, size int32) +func sigaltstack(new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigprocmask(new, old unsafe.Pointer) +func setitimer(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func getrlimit(kind int32, limit unsafe.Pointer) int32 +func raise(sig int32) +func sys_umtx_op(addr unsafe.Pointer, mode int32, val uint32, ptr2, ts unsafe.Pointer) int32 diff --git a/libgo/go/runtime/os_linux.go b/libgo/go/runtime/os_linux.go new file mode 100644 index 0000000..41123ad --- /dev/null +++ b/libgo/go/runtime/os_linux.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32 +func clone(flags int32, stk, mm, gg, fn unsafe.Pointer) int32 +func rt_sigaction(sig uintptr, new, old unsafe.Pointer, size uintptr) int32 +func sigaltstack(new, old unsafe.Pointer) +func setitimer(mode int32, new, old unsafe.Pointer) +func rtsigprocmask(sig int32, new, old unsafe.Pointer, size int32) +func getrlimit(kind int32, limit unsafe.Pointer) int32 +func raise(sig int32) +func sched_getaffinity(pid, len uintptr, buf *uintptr) int32 diff --git a/libgo/go/runtime/os_nacl.go b/libgo/go/runtime/os_nacl.go new file mode 100644 index 0000000..8dd43ff --- /dev/null +++ b/libgo/go/runtime/os_nacl.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func nacl_exception_stack(p unsafe.Pointer, size int32) int32 +func nacl_exception_handler(fn, arg unsafe.Pointer) int32 +func nacl_sem_create(flag int32) int32 +func nacl_sem_wait(sem int32) int32 +func nacl_sem_post(sem int32) int32 +func nacl_mutex_create(flag int32) int32 +func nacl_mutex_lock(mutex int32) int32 +func nacl_mutex_trylock(mutex int32) int32 +func nacl_mutex_unlock(mutex int32) int32 +func nacl_cond_create(flag int32) int32 +func nacl_cond_wait(cond, n int32) int32 +func nacl_cond_signal(cond int32) int32 +func nacl_cond_broadcast(cond int32) int32 +func nacl_cond_timed_wait_abs(cond, lock int32, ts unsafe.Pointer) int32 +func nacl_thread_create(fn, stk, tls, xx unsafe.Pointer) int32 +func nacl_nanosleep(ts, extra unsafe.Pointer) int32 + +func os_sigpipe() { + gothrow("too many writes on closed pipe") +} + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + // Native Client only invokes the exception handler for memory faults. + g.sig = _SIGSEGV + panicmem() +} diff --git a/libgo/go/runtime/os_netbsd.go b/libgo/go/runtime/os_netbsd.go new file mode 100644 index 0000000..f000c5e --- /dev/null +++ b/libgo/go/runtime/os_netbsd.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func setitimer(mode int32, new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigprocmask(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func lwp_tramp() +func raise(sig int32) +func getcontext(ctxt unsafe.Pointer) +func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32 +func lwp_park(abstime unsafe.Pointer, unpark int32, hint, unparkhint unsafe.Pointer) int32 +func lwp_unpark(lwp int32, hint unsafe.Pointer) int32 +func lwp_self() int32 diff --git a/libgo/go/runtime/os_openbsd.go b/libgo/go/runtime/os_openbsd.go new file mode 100644 index 0000000..a000f96 --- /dev/null +++ b/libgo/go/runtime/os_openbsd.go @@ -0,0 +1,17 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func setitimer(mode int32, new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigprocmask(mode int32, new uint32) uint32 +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func raise(sig int32) +func tfork(param unsafe.Pointer, psize uintptr, mm, gg, fn unsafe.Pointer) int32 +func thrsleep(ident unsafe.Pointer, clock_id int32, tsp, lock, abort unsafe.Pointer) int32 +func thrwakeup(ident unsafe.Pointer, n int32) int32 diff --git a/libgo/go/runtime/os_plan9.go b/libgo/go/runtime/os_plan9.go new file mode 100644 index 0000000..20e47bf --- /dev/null +++ b/libgo/go/runtime/os_plan9.go @@ -0,0 +1,103 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 +func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 +func seek(fd int32, offset int64, whence int32) int64 +func exits(msg *byte) +func brk_(addr unsafe.Pointer) uintptr +func sleep(ms int32) int32 +func rfork(flags int32) int32 +func plan9_semacquire(addr *uint32, block int32) int32 +func plan9_tsemacquire(addr *uint32, ms int32) int32 +func plan9_semrelease(addr *uint32, count int32) int32 +func notify(fn unsafe.Pointer) int32 +func noted(mode int32) int32 +func nsec(*int64) int64 +func sigtramp(ureg, msg unsafe.Pointer) +func setfpmasks() +func tstart_plan9(newm *m) +func errstr() string + +type _Plink uintptr + +func os_sigpipe() { + gothrow("too many writes on closed pipe") +} + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig))) + switch g.sig { + case _SIGRFAULT, _SIGWFAULT: + addr := note[index(note, "addr=")+5:] + g.sigcode1 = uintptr(atolwhex(addr)) + if g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _SIGTRAP: + if g.paniconfault { + panicmem() + } + gothrow(note) + case _SIGINTDIV: + panicdivide() + case _SIGFLOAT: + panicfloat() + default: + panic(errorString(note)) + } +} + +func atolwhex(p string) int64 { + for hasprefix(p, " ") || hasprefix(p, "\t") { + p = p[1:] + } + neg := false + if hasprefix(p, "-") || hasprefix(p, "+") { + neg = p[0] == '-' + p = p[1:] + for hasprefix(p, " ") || hasprefix(p, "\t") { + p = p[1:] + } + } + var n int64 + switch { + case hasprefix(p, "0x"), hasprefix(p, "0X"): + p = p[2:] + for ; len(p) > 0; p = p[1:] { + if '0' <= p[0] && p[0] <= '9' { + n = n*16 + int64(p[0]-'0') + } else if 'a' <= p[0] && p[0] <= 'f' { + n = n*16 + int64(p[0]-'a'+10) + } else if 'A' <= p[0] && p[0] <= 'F' { + n = n*16 + int64(p[0]-'A'+10) + } else { + break + } + } + case hasprefix(p, "0"): + for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] { + n = n*8 + int64(p[0]-'0') + } + default: + for ; len(p) > 0 && '0' <= p[0] && p[0] <= '9'; p = p[1:] { + n = n*10 + int64(p[0]-'0') + } + } + if neg { + n = -n + } + return n +} diff --git a/libgo/go/runtime/os_solaris.go b/libgo/go/runtime/os_solaris.go new file mode 100644 index 0000000..ca13151 --- /dev/null +++ b/libgo/go/runtime/os_solaris.go @@ -0,0 +1,100 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func setitimer(mode int32, new, old unsafe.Pointer) +func sigaction(sig int32, new, old unsafe.Pointer) +func sigaltstack(new, old unsafe.Pointer) +func sigprocmask(mode int32, new, old unsafe.Pointer) +func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32 +func getrlimit(kind int32, limit unsafe.Pointer) +func miniterrno(fn unsafe.Pointer) +func raise(sig int32) +func getcontext(ctxt unsafe.Pointer) +func tstart_sysvicall(mm unsafe.Pointer) uint32 +func nanotime1() int64 +func usleep1(usec uint32) +func osyield1() +func netpollinit() +func netpollopen(fd uintptr, pd *pollDesc) int32 +func netpollclose(fd uintptr) int32 +func netpollarm(pd *pollDesc, mode int) + +type libcFunc byte + +var asmsysvicall6 libcFunc + +//go:nosplit +func sysvicall0(fn *libcFunc) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 0 + // TODO(rsc): Why is noescape necessary here and below? + libcall.args = uintptr(noescape(unsafe.Pointer(&fn))) // it's unused but must be non-nil, otherwise crashes + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall1(fn *libcFunc, a1 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 1 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 2 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 3 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 4 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 5 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} + +//go:nosplit +func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr { + libcall := &getg().m.libcall + libcall.fn = uintptr(unsafe.Pointer(fn)) + libcall.n = 6 + libcall.args = uintptr(noescape(unsafe.Pointer(&a1))) + asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall)) + return libcall.r1 +} diff --git a/libgo/go/runtime/os_windows.go b/libgo/go/runtime/os_windows.go new file mode 100644 index 0000000..1528d2f --- /dev/null +++ b/libgo/go/runtime/os_windows.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +type stdFunction *byte + +func stdcall0(fn stdFunction) uintptr +func stdcall1(fn stdFunction, a0 uintptr) uintptr +func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr +func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr +func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr +func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr +func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr +func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr + +func asmstdcall(fn unsafe.Pointer) +func getlasterror() uint32 +func setlasterror(err uint32) +func usleep1(usec uint32) +func netpollinit() +func netpollopen(fd uintptr, pd *pollDesc) int32 +func netpollclose(fd uintptr) int32 +func netpollarm(pd *pollDesc, mode int) + +func os_sigpipe() { + gothrow("too many writes on closed pipe") +} + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + switch uint32(g.sig) { + case _EXCEPTION_ACCESS_VIOLATION: + if g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _EXCEPTION_INT_DIVIDE_BY_ZERO: + panicdivide() + case _EXCEPTION_INT_OVERFLOW: + panicoverflow() + case _EXCEPTION_FLT_DENORMAL_OPERAND, + _EXCEPTION_FLT_DIVIDE_BY_ZERO, + _EXCEPTION_FLT_INEXACT_RESULT, + _EXCEPTION_FLT_OVERFLOW, + _EXCEPTION_FLT_UNDERFLOW: + panicfloat() + } + gothrow("fault") +} diff --git a/libgo/go/runtime/os_windows_386.go b/libgo/go/runtime/os_windows_386.go new file mode 100644 index 0000000..86a1906 --- /dev/null +++ b/libgo/go/runtime/os_windows_386.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// contextPC returns the EIP (program counter) register from the context. +func contextPC(r *context) uintptr { return uintptr(r.eip) } + +// contextSP returns the ESP (stack pointer) register from the context. +func contextSP(r *context) uintptr { return uintptr(r.esp) } diff --git a/libgo/go/runtime/os_windows_amd64.go b/libgo/go/runtime/os_windows_amd64.go new file mode 100644 index 0000000..3f4d4d0 --- /dev/null +++ b/libgo/go/runtime/os_windows_amd64.go @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// contextPC returns the RIP (program counter) register from the context. +func contextPC(r *context) uintptr { return uintptr(r.rip) } + +// contextSP returns the RSP (stack pointer) register from the context. +func contextSP(r *context) uintptr { return uintptr(r.rsp) } diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go new file mode 100644 index 0000000..685ff5c --- /dev/null +++ b/libgo/go/runtime/panic.go @@ -0,0 +1,505 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +var indexError = error(errorString("index out of range")) + +func panicindex() { + panic(indexError) +} + +var sliceError = error(errorString("slice bounds out of range")) + +func panicslice() { + panic(sliceError) +} + +var divideError = error(errorString("integer divide by zero")) + +func panicdivide() { + panic(divideError) +} + +var overflowError = error(errorString("integer overflow")) + +func panicoverflow() { + panic(overflowError) +} + +var floatError = error(errorString("floating point error")) + +func panicfloat() { + panic(floatError) +} + +var memoryError = error(errorString("invalid memory address or nil pointer dereference")) + +func panicmem() { + panic(memoryError) +} + +func throwreturn() { + gothrow("no return at end of a typed function - compiler is broken") +} + +func throwinit() { + gothrow("recursive call during initialization - linker skew") +} + +// Create a new deferred function fn with siz bytes of arguments. +// The compiler turns a defer statement into a call to this. +//go:nosplit +func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn + // the arguments of fn are in a perilous state. The stack map + // for deferproc does not describe them. So we can't let garbage + // collection or stack copying trigger until we've copied them out + // to somewhere safe. deferproc_m does that. Until deferproc_m, + // we can only call nosplit routines. + argp := uintptr(unsafe.Pointer(&fn)) + argp += unsafe.Sizeof(fn) + if GOARCH == "arm" { + argp += ptrSize // skip caller's saved link register + } + mp := acquirem() + mp.scalararg[0] = uintptr(siz) + mp.ptrarg[0] = unsafe.Pointer(fn) + mp.scalararg[1] = argp + mp.scalararg[2] = getcallerpc(unsafe.Pointer(&siz)) + + if mp.curg != getg() { + // go code on the m stack can't defer + gothrow("defer on m") + } + + onM(deferproc_m) + + releasem(mp) + + // deferproc returns 0 normally. + // a deferred func that stops a panic + // makes the deferproc return 1. + // the code the compiler generates always + // checks the return value and jumps to the + // end of the function if deferproc returns != 0. + return0() + // No code can go here - the C return register has + // been set and must not be clobbered. +} + +// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... +// Each P holds a pool for defers with small arg sizes. +// Assign defer allocations to pools by rounding to 16, to match malloc size classes. + +const ( + deferHeaderSize = unsafe.Sizeof(_defer{}) + minDeferAlloc = (deferHeaderSize + 15) &^ 15 + minDeferArgs = minDeferAlloc - deferHeaderSize +) + +// defer size class for arg size sz +//go:nosplit +func deferclass(siz uintptr) uintptr { + if siz <= minDeferArgs { + return 0 + } + return (siz - minDeferArgs + 15) / 16 +} + +// total size of memory block for defer with arg size sz +func totaldefersize(siz uintptr) uintptr { + if siz <= minDeferArgs { + return minDeferAlloc + } + return deferHeaderSize + siz +} + +// Ensure that defer arg sizes that map to the same defer size class +// also map to the same malloc size class. +func testdefersizes() { + var m [len(p{}.deferpool)]int32 + + for i := range m { + m[i] = -1 + } + for i := uintptr(0); ; i++ { + defersc := deferclass(i) + if defersc >= uintptr(len(m)) { + break + } + siz := goroundupsize(totaldefersize(i)) + if m[defersc] < 0 { + m[defersc] = int32(siz) + continue + } + if m[defersc] != int32(siz) { + print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") + gothrow("bad defer size class") + } + } +} + +// The arguments associated with a deferred call are stored +// immediately after the _defer header in memory. +//go:nosplit +func deferArgs(d *_defer) unsafe.Pointer { + return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) +} + +var deferType *_type // type of _defer struct + +func init() { + var x interface{} + x = (*_defer)(nil) + deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem +} + +// Allocate a Defer, usually using per-P pool. +// Each defer must be released with freedefer. +// Note: runs on M stack +func newdefer(siz int32) *_defer { + var d *_defer + sc := deferclass(uintptr(siz)) + mp := acquirem() + if sc < uintptr(len(p{}.deferpool)) { + pp := mp.p + d = pp.deferpool[sc] + if d != nil { + pp.deferpool[sc] = d.link + } + } + if d == nil { + // Allocate new defer+args. + total := goroundupsize(totaldefersize(uintptr(siz))) + d = (*_defer)(mallocgc(total, deferType, 0)) + } + d.siz = siz + gp := mp.curg + d.link = gp._defer + gp._defer = d + releasem(mp) + return d +} + +// Free the given defer. +// The defer cannot be used after this call. +//go:nosplit +func freedefer(d *_defer) { + if d._panic != nil { + freedeferpanic() + } + if d.fn != nil { + freedeferfn() + } + sc := deferclass(uintptr(d.siz)) + if sc < uintptr(len(p{}.deferpool)) { + mp := acquirem() + pp := mp.p + *d = _defer{} + d.link = pp.deferpool[sc] + pp.deferpool[sc] = d + releasem(mp) + } +} + +// Separate function so that it can split stack. +// Windows otherwise runs out of stack space. +func freedeferpanic() { + // _panic must be cleared before d is unlinked from gp. + gothrow("freedefer with d._panic != nil") +} + +func freedeferfn() { + // fn must be cleared before d is unlinked from gp. + gothrow("freedefer with d.fn != nil") +} + +// Run a deferred function if there is one. +// The compiler inserts a call to this at the end of any +// function which calls defer. +// If there is a deferred function, this will call runtime·jmpdefer, +// which will jump to the deferred function such that it appears +// to have been called by the caller of deferreturn at the point +// just before deferreturn was called. The effect is that deferreturn +// is called again and again until there are no more deferred functions. +// Cannot split the stack because we reuse the caller's frame to +// call the deferred function. + +// The single argument isn't actually used - it just has its address +// taken so it can be matched against pending defers. +//go:nosplit +func deferreturn(arg0 uintptr) { + gp := getg() + d := gp._defer + if d == nil { + return + } + argp := uintptr(unsafe.Pointer(&arg0)) + if d.argp != argp { + return + } + + // Moving arguments around. + // Do not allow preemption here, because the garbage collector + // won't know the form of the arguments until the jmpdefer can + // flip the PC over to fn. + mp := acquirem() + memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz)) + fn := d.fn + d.fn = nil + gp._defer = d.link + freedefer(d) + releasem(mp) + jmpdefer(fn, argp) +} + +// Goexit terminates the goroutine that calls it. No other goroutine is affected. +// Goexit runs all deferred calls before terminating the goroutine. Because Goexit +// is not panic, however, any recover calls in those deferred functions will return nil. +// +// Calling Goexit from the main goroutine terminates that goroutine +// without func main returning. Since func main has not returned, +// the program continues execution of other goroutines. +// If all other goroutines exit, the program crashes. +func Goexit() { + // Run all deferred functions for the current goroutine. + // This code is similar to gopanic, see that implementation + // for detailed comments. + gp := getg() + for { + d := gp._defer + if d == nil { + break + } + if d.started { + if d._panic != nil { + d._panic.aborted = true + d._panic = nil + } + d.fn = nil + gp._defer = d.link + freedefer(d) + continue + } + d.started = true + reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + if gp._defer != d { + gothrow("bad defer entry in Goexit") + } + d._panic = nil + d.fn = nil + gp._defer = d.link + freedefer(d) + // Note: we ignore recovers here because Goexit isn't a panic + } + goexit() +} + +func canpanic(*g) bool + +// Print all currently active panics. Used when crashing. +func printpanics(p *_panic) { + if p.link != nil { + printpanics(p.link) + print("\t") + } + print("panic: ") + printany(p.arg) + if p.recovered { + print(" [recovered]") + } + print("\n") +} + +// The implementation of the predeclared function panic. +func gopanic(e interface{}) { + gp := getg() + if gp.m.curg != gp { + gothrow("panic on m stack") + } + + // m.softfloat is set during software floating point. + // It increments m.locks to avoid preemption. + // We moved the memory loads out, so there shouldn't be + // any reason for it to panic anymore. + if gp.m.softfloat != 0 { + gp.m.locks-- + gp.m.softfloat = 0 + gothrow("panic during softfloat") + } + if gp.m.mallocing != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic during malloc") + } + if gp.m.gcing != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic during gc") + } + if gp.m.locks != 0 { + print("panic: ") + printany(e) + print("\n") + gothrow("panic holding locks") + } + + var p _panic + p.arg = e + p.link = gp._panic + gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) + + for { + d := gp._defer + if d == nil { + break + } + + // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), + // take defer off list. The earlier panic or Goexit will not continue running. + if d.started { + if d._panic != nil { + d._panic.aborted = true + } + d._panic = nil + d.fn = nil + gp._defer = d.link + freedefer(d) + continue + } + + // Mark defer as started, but keep on list, so that traceback + // can find and update the defer's argument frame if stack growth + // or a garbage collection hapens before reflectcall starts executing d.fn. + d.started = true + + // Record the panic that is running the defer. + // If there is a new panic during the deferred call, that panic + // will find d in the list and will mark d._panic (this panic) aborted. + d._panic = (*_panic)(noescape((unsafe.Pointer)(&p))) + + p.argp = unsafe.Pointer(getargp(0)) + reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) + p.argp = nil + + // reflectcall did not panic. Remove d. + if gp._defer != d { + gothrow("bad defer entry in panic") + } + d._panic = nil + d.fn = nil + gp._defer = d.link + + // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic + //GC() + + pc := d.pc + argp := unsafe.Pointer(d.argp) // must be pointer so it gets adjusted during stack copy + freedefer(d) + if p.recovered { + gp._panic = p.link + // Aborted panics are marked but remain on the g.panic list. + // Remove them from the list. + for gp._panic != nil && gp._panic.aborted { + gp._panic = gp._panic.link + } + if gp._panic == nil { // must be done with signal + gp.sig = 0 + } + // Pass information about recovering frame to recovery. + gp.sigcode0 = uintptr(argp) + gp.sigcode1 = pc + mcall(recovery_m) + gothrow("recovery failed") // mcall should not return + } + } + + // ran out of deferred calls - old-school panic now + startpanic() + printpanics(gp._panic) + dopanic(0) // should not return + *(*int)(nil) = 0 // not reached +} + +// getargp returns the location where the caller +// writes outgoing function call arguments. +//go:nosplit +func getargp(x int) uintptr { + // x is an argument mainly so that we can return its address. + // However, we need to make the function complex enough + // that it won't be inlined. We always pass x = 0, so this code + // does nothing other than keep the compiler from thinking + // the function is simple enough to inline. + if x > 0 { + return getcallersp(unsafe.Pointer(&x)) * 0 + } + return uintptr(noescape(unsafe.Pointer(&x))) +} + +// The implementation of the predeclared function recover. +// Cannot split the stack because it needs to reliably +// find the stack segment of its caller. +// +// TODO(rsc): Once we commit to CopyStackAlways, +// this doesn't need to be nosplit. +//go:nosplit +func gorecover(argp uintptr) interface{} { + // Must be in a function running as part of a deferred call during the panic. + // Must be called from the topmost function of the call + // (the function used in the defer statement). + // p.argp is the argument pointer of that topmost deferred function call. + // Compare against argp reported by caller. + // If they match, the caller is the one who can recover. + gp := getg() + p := gp._panic + if p != nil && !p.recovered && argp == uintptr(p.argp) { + p.recovered = true + return p.arg + } + return nil +} + +//go:nosplit +func startpanic() { + onM_signalok(startpanic_m) +} + +//go:nosplit +func dopanic(unused int) { + gp := getg() + mp := acquirem() + mp.ptrarg[0] = unsafe.Pointer(gp) + mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused)) + mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused)) + onM_signalok(dopanic_m) // should never return + *(*int)(nil) = 0 +} + +//go:nosplit +func throw(s *byte) { + gp := getg() + if gp.m.throwing == 0 { + gp.m.throwing = 1 + } + startpanic() + print("fatal error: ", gostringnocopy(s), "\n") + dopanic(0) + *(*int)(nil) = 0 // not reached +} + +//go:nosplit +func gothrow(s string) { + gp := getg() + if gp.m.throwing == 0 { + gp.m.throwing = 1 + } + startpanic() + print("fatal error: ", s, "\n") + dopanic(0) + *(*int)(nil) = 0 // not reached +} diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go new file mode 100644 index 0000000..d4a9485 --- /dev/null +++ b/libgo/go/runtime/pprof/mprof_test.go @@ -0,0 +1,99 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pprof_test + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + . "runtime/pprof" + "testing" + "unsafe" +) + +var memSink interface{} + +func allocateTransient1M() { + for i := 0; i < 1024; i++ { + memSink = &struct{ x [1024]byte }{} + } +} + +func allocateTransient2M() { + // prevent inlining + if memSink == nil { + panic("bad") + } + memSink = make([]byte, 2<<20) +} + +type Obj32 struct { + link *Obj32 + pad [32 - unsafe.Sizeof(uintptr(0))]byte +} + +var persistentMemSink *Obj32 + +func allocatePersistent1K() { + for i := 0; i < 32; i++ { + // Can't use slice because that will introduce implicit allocations. + obj := &Obj32{link: persistentMemSink} + persistentMemSink = obj + } +} + +var memoryProfilerRun = 0 + +func TestMemoryProfiler(t *testing.T) { + // Disable sampling, otherwise it's difficult to assert anything. + oldRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + defer func() { + runtime.MemProfileRate = oldRate + }() + + // Allocate a meg to ensure that mcache.next_sample is updated to 1. + for i := 0; i < 1024; i++ { + memSink = make([]byte, 1024) + } + + // Do the interesting allocations. + allocateTransient1M() + allocateTransient2M() + allocatePersistent1K() + memSink = nil + + runtime.GC() // materialize stats + var buf bytes.Buffer + if err := Lookup("heap").WriteTo(&buf, 1); err != nil { + t.Fatalf("failed to write heap profile: %v", err) + } + + memoryProfilerRun++ + + tests := []string{ + fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+ +# 0x[0-9,a-f]+ pprof_test\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:43 +# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:66 +`, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun), + + fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+ +# 0x[0-9,a-f]+ pprof_test\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:21 +# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:64 +`, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun), + + fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+ +# 0x[0-9,a-f]+ pprof_test\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:30 +# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:65 +`, memoryProfilerRun, (2<<20)*memoryProfilerRun), + } + + for _, test := range tests { + if !regexp.MustCompile(test).Match(buf.Bytes()) { + t.Fatalf("The entry did not match:\n%v\n\nProfile:\n%v\n", test, buf.String()) + } + } +} diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go index bd0b25f..9c63ccd 100644 --- a/libgo/go/runtime/pprof/pprof.go +++ b/libgo/go/runtime/pprof/pprof.go @@ -345,7 +345,10 @@ func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) { // Hide runtime.goexit and any runtime functions at the beginning. // This is useful mainly for allocation traces. wasPanic = name == "runtime.panic" - if name == "runtime.goexit" || !show && strings.HasPrefix(name, "runtime.") { + if name == "runtime.goexit" || !show && (strings.HasPrefix(name, "runtime.") || strings.HasPrefix(name, "runtime_")) { + continue + } + if !show && !strings.Contains(name, ".") && strings.HasPrefix(name, "__go_") { continue } show = true @@ -579,12 +582,6 @@ func StartCPUProfile(w io.Writer) error { // each client to specify the frequency, we hard code it. const hz = 100 - // Avoid queueing behind StopCPUProfile. - // Could use TryLock instead if we had it. - if cpu.profiling { - return fmt.Errorf("cpu profiling already in use") - } - cpu.Lock() defer cpu.Unlock() if cpu.done == nil { diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go index f714472..1069963 100644 --- a/libgo/go/runtime/pprof/pprof_test.go +++ b/libgo/go/runtime/pprof/pprof_test.go @@ -9,7 +9,6 @@ package pprof_test import ( "bytes" "fmt" - "hash/crc32" "math/big" "os/exec" "regexp" @@ -22,35 +21,65 @@ import ( "unsafe" ) -func TestCPUProfile(t *testing.T) { - buf := make([]byte, 100000) - testCPUProfile(t, []string{"crc32.update"}, func() { - // This loop takes about a quarter second on a 2 GHz laptop. - // We only need to get one 100 Hz clock tick, so we've got - // a 25x safety buffer. - for i := 0; i < 1000; i++ { - crc32.ChecksumIEEE(buf) +func cpuHogger(f func()) { + // We only need to get one 100 Hz clock tick, so we've got + // a 25x safety buffer. + // But do at least 500 iterations (which should take about 100ms), + // otherwise TestCPUProfileMultithreaded can fail if only one + // thread is scheduled during the 250ms period. + t0 := time.Now() + for i := 0; i < 500 || time.Since(t0) < 250*time.Millisecond; i++ { + f() + } +} + +var ( + salt1 = 0 + salt2 = 0 +) + +// The actual CPU hogging function. +// Must not call other functions nor access heap/globals in the loop, +// otherwise under race detector the samples will be in the race runtime. +func cpuHog1() { + foo := salt1 + for i := 0; i < 1e5; i++ { + if foo > 0 { + foo *= foo + } else { + foo *= foo + 1 } + } + salt1 = foo +} + +func cpuHog2() { + foo := salt2 + for i := 0; i < 1e5; i++ { + if foo > 0 { + foo *= foo + } else { + foo *= foo + 2 + } + } + salt2 = foo +} + +func TestCPUProfile(t *testing.T) { + testCPUProfile(t, []string{"pprof_test.cpuHog1"}, func() { + cpuHogger(cpuHog1) }) } func TestCPUProfileMultithreaded(t *testing.T) { - buf := make([]byte, 100000) defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) - testCPUProfile(t, []string{"crc32.update"}, func() { + testCPUProfile(t, []string{"pprof_test.cpuHog1", "pprof_test.cpuHog2"}, func() { c := make(chan int) go func() { - for i := 0; i < 2000; i++ { - crc32.Update(0, crc32.IEEETable, buf) - } + cpuHogger(cpuHog1) c <- 1 }() - // This loop takes about a quarter second on a 2 GHz laptop. - // We only need to get one 100 Hz clock tick, so we've got - // a 25x safety buffer. - for i := 0; i < 2000; i++ { - crc32.ChecksumIEEE(buf) - } + cpuHogger(cpuHog2) <-c }) } @@ -110,7 +139,7 @@ func testCPUProfile(t *testing.T, need []string, f func()) { f() StopCPUProfile() - // Check that profile is well formed and contains ChecksumIEEE. + // Check that profile is well formed and contains need. have := make([]uintptr, len(need)) parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) { for _, pc := range stk { @@ -118,6 +147,7 @@ func testCPUProfile(t *testing.T, need []string, f func()) { if f == nil { continue } + t.Log(f.Name(), count) for i, name := range need { if strings.Contains(f.Name(), name) { have[i] += count @@ -220,7 +250,7 @@ func TestGoroutineSwitch(t *testing.T) { // exists to record a PC without a traceback. Those are okay. if len(stk) == 2 { f := runtime.FuncForPC(stk[1]) - if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode") { + if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode" || f.Name() == "GC") { return } } @@ -282,39 +312,45 @@ func TestBlockProfile(t *testing.T) { tests := [...]TestCase{ {"chan recv", blockChanRecv, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"chan send", blockChanSend, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"chan close", blockChanClose, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"select recv async", blockSelectRecvAsync, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"select send sync", blockSelectSendSync, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.goc:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, {"mutex", blockMutex, ` [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ -# 0x[0-9,a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9,a-f]+ .*/src/pkg/sync/mutex\.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.blockMutex\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ -# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9,a-f]+ .*/src/sync/mutex\.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockMutex\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +`}, + {"cond", blockCond, ` +[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ +# 0x[0-9,a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9,a-f]+ .*/src/sync/cond\.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.blockCond\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ +# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ `}, } @@ -402,3 +438,17 @@ func blockMutex() { }() mu.Lock() } + +func blockCond() { + var mu sync.Mutex + c := sync.NewCond(&mu) + mu.Lock() + go func() { + time.Sleep(blockDelay) + mu.Lock() + c.Signal() + mu.Unlock() + }() + c.Wait() + mu.Unlock() +} diff --git a/libgo/go/runtime/print1.go b/libgo/go/runtime/print1.go new file mode 100644 index 0000000..8f82688 --- /dev/null +++ b/libgo/go/runtime/print1.go @@ -0,0 +1,323 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// The compiler knows that a print of a value of this type +// should use printhex instead of printuint (decimal). +type hex uint64 + +func bytes(s string) (ret []byte) { + rp := (*slice)(unsafe.Pointer(&ret)) + sp := (*_string)(noescape(unsafe.Pointer(&s))) + rp.array = sp.str + rp.len = uint(sp.len) + rp.cap = uint(sp.len) + return +} + +// printf is only called from C code. It has no type information for the args, +// but C stacks are ignored by the garbage collector anyway, so having +// type information would not add anything. +//go:nosplit +func printf(s *byte) { + vprintf(gostringnocopy(s), add(unsafe.Pointer(&s), unsafe.Sizeof(s))) +} + +// sprintf is only called from C code. It has no type information for the args, +// but C stacks are ignored by the garbage collector anyway, so having +// type information would not add anything. +//go:nosplit +func snprintf(dst *byte, n int32, s *byte) { + buf := (*[1 << 30]byte)(unsafe.Pointer(dst))[0:n:n] + + gp := getg() + gp.writebuf = buf[0:0 : n-1] // leave room for NUL, this is called from C + vprintf(gostringnocopy(s), add(unsafe.Pointer(&s), unsafe.Sizeof(s))) + buf[len(gp.writebuf)] = '\x00' + gp.writebuf = nil +} + +//var debuglock mutex + +// write to goroutine-local buffer if diverting output, +// or else standard error. +func gwrite(b []byte) { + if len(b) == 0 { + return + } + gp := getg() + if gp == nil || gp.writebuf == nil { + write(2, unsafe.Pointer(&b[0]), int32(len(b))) + return + } + + n := copy(gp.writebuf[len(gp.writebuf):cap(gp.writebuf)], b) + gp.writebuf = gp.writebuf[:len(gp.writebuf)+n] +} + +func prints(s *byte) { + b := (*[1 << 30]byte)(unsafe.Pointer(s)) + for i := 0; ; i++ { + if b[i] == 0 { + gwrite(b[:i]) + return + } + } +} + +func printsp() { + print(" ") +} + +func printnl() { + print("\n") +} + +// Very simple printf. Only for debugging prints. +// Do not add to this without checking with Rob. +func vprintf(str string, arg unsafe.Pointer) { + //lock(&debuglock); + + s := bytes(str) + start := 0 + i := 0 + for ; i < len(s); i++ { + if s[i] != '%' { + continue + } + if i > start { + gwrite(s[start:i]) + } + if i++; i >= len(s) { + break + } + var siz uintptr + switch s[i] { + case 't', 'c': + siz = 1 + case 'd', 'x': // 32-bit + arg = roundup(arg, 4) + siz = 4 + case 'D', 'U', 'X', 'f': // 64-bit + arg = roundup(arg, unsafe.Sizeof(uintreg(0))) + siz = 8 + case 'C': + arg = roundup(arg, unsafe.Sizeof(uintreg(0))) + siz = 16 + case 'p', 's': // pointer-sized + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof(uintptr(0)) + case 'S': // pointer-aligned but bigger + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof(string("")) + case 'a': // pointer-aligned but bigger + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof([]byte{}) + case 'i', 'e': // pointer-aligned but bigger + arg = roundup(arg, unsafe.Sizeof(uintptr(0))) + siz = unsafe.Sizeof(interface{}(nil)) + } + switch s[i] { + case 'a': + printslice(*(*[]byte)(arg)) + case 'c': + printbyte(*(*byte)(arg)) + case 'd': + printint(int64(*(*int32)(arg))) + case 'D': + printint(int64(*(*int64)(arg))) + case 'e': + printeface(*(*interface{})(arg)) + case 'f': + printfloat(*(*float64)(arg)) + case 'C': + printcomplex(*(*complex128)(arg)) + case 'i': + printiface(*(*fInterface)(arg)) + case 'p': + printpointer(*(*unsafe.Pointer)(arg)) + case 's': + prints(*(**byte)(arg)) + case 'S': + printstring(*(*string)(arg)) + case 't': + printbool(*(*bool)(arg)) + case 'U': + printuint(*(*uint64)(arg)) + case 'x': + printhex(uint64(*(*uint32)(arg))) + case 'X': + printhex(*(*uint64)(arg)) + } + arg = add(arg, siz) + start = i + 1 + } + if start < i { + gwrite(s[start:i]) + } + + //unlock(&debuglock); +} + +func printpc(p unsafe.Pointer) { + print("PC=", hex(uintptr(p))) +} + +func printbool(v bool) { + if v { + print("true") + } else { + print("false") + } +} + +func printbyte(c byte) { + gwrite((*[1]byte)(unsafe.Pointer(&c))[:]) +} + +func printfloat(v float64) { + switch { + case v != v: + print("NaN") + return + case v+v == v && v > 0: + print("+Inf") + return + case v+v == v && v < 0: + print("-Inf") + return + } + + const n = 7 // digits printed + var buf [n + 7]byte + buf[0] = '+' + e := 0 // exp + if v == 0 { + if 1/v < 0 { + buf[0] = '-' + } + } else { + if v < 0 { + v = -v + buf[0] = '-' + } + + // normalize + for v >= 10 { + e++ + v /= 10 + } + for v < 1 { + e-- + v *= 10 + } + + // round + h := 5.0 + for i := 0; i < n; i++ { + h /= 10 + } + v += h + if v >= 10 { + e++ + v /= 10 + } + } + + // format +d.dddd+edd + for i := 0; i < n; i++ { + s := int(v) + buf[i+2] = byte(s + '0') + v -= float64(s) + v *= 10 + } + buf[1] = buf[2] + buf[2] = '.' + + buf[n+2] = 'e' + buf[n+3] = '+' + if e < 0 { + e = -e + buf[n+3] = '-' + } + + buf[n+4] = byte(e/100) + '0' + buf[n+5] = byte(e/10)%10 + '0' + buf[n+6] = byte(e%10) + '0' + gwrite(buf[:]) +} + +func printcomplex(c complex128) { + print("(", real(c), imag(c), "i)") +} + +func printuint(v uint64) { + var buf [100]byte + i := len(buf) + for i--; i > 0; i-- { + buf[i] = byte(v%10 + '0') + if v < 10 { + break + } + v /= 10 + } + gwrite(buf[i:]) +} + +func printint(v int64) { + if v < 0 { + print("-") + v = -v + } + printuint(uint64(v)) +} + +func printhex(v uint64) { + const dig = "0123456789abcdef" + var buf [100]byte + i := len(buf) + for i--; i > 0; i-- { + buf[i] = dig[v%16] + if v < 16 { + break + } + v /= 16 + } + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + gwrite(buf[i:]) +} + +func printpointer(p unsafe.Pointer) { + printhex(uint64(uintptr(p))) +} + +func printstring(s string) { + if uintptr(len(s)) > maxstring { + gwrite(bytes("[string too long]")) + return + } + gwrite(bytes(s)) +} + +func printslice(s []byte) { + sp := (*slice)(unsafe.Pointer(&s)) + print("[", len(s), "/", cap(s), "]") + printpointer(unsafe.Pointer(sp.array)) +} + +func printeface(e interface{}) { + ep := (*eface)(unsafe.Pointer(&e)) + print("(", ep._type, ",", ep.data, ")") +} + +func printiface(i fInterface) { + ip := (*iface)(unsafe.Pointer(&i)) + print("(", ip.tab, ",", ip.data, ")") +} diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go new file mode 100644 index 0000000..517ca03 --- /dev/null +++ b/libgo/go/runtime/proc.go @@ -0,0 +1,246 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +func newsysmon() + +func runtime_init() +func main_init() +func main_main() + +// The main goroutine. +func main() { + g := getg() + + // Racectx of m0->g0 is used only as the parent of the main goroutine. + // It must not be used for anything else. + g.m.g0.racectx = 0 + + // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit. + // Using decimal instead of binary GB and MB because + // they look nicer in the stack overflow failure message. + if ptrSize == 8 { + maxstacksize = 1000000000 + } else { + maxstacksize = 250000000 + } + + onM(newsysmon) + + // Lock the main goroutine onto this, the main OS thread, + // during initialization. Most programs won't care, but a few + // do require certain calls to be made by the main thread. + // Those can arrange for main.main to run in the main thread + // by calling runtime.LockOSThread during initialization + // to preserve the lock. + lockOSThread() + + if g.m != &m0 { + gothrow("runtime.main not on m0") + } + + runtime_init() // must be before defer + + // Defer unlock so that runtime.Goexit during init does the unlock too. + needUnlock := true + defer func() { + if needUnlock { + unlockOSThread() + } + }() + + memstats.enablegc = true // now that runtime is initialized, GC is okay + + main_init() + + needUnlock = false + unlockOSThread() + + main_main() + if raceenabled { + racefini() + } + + // Make racy client program work: if panicking on + // another goroutine at the same time as main returns, + // let the other goroutine finish printing the panic trace. + // Once it does, it will exit. See issue 3934. + if panicking != 0 { + gopark(nil, nil, "panicwait") + } + + exit(0) + for { + var x *int32 + *x = 0 + } +} + +var parkunlock_c byte + +// start forcegc helper goroutine +func init() { + go forcegchelper() +} + +func forcegchelper() { + forcegc.g = getg() + forcegc.g.issystem = true + for { + lock(&forcegc.lock) + if forcegc.idle != 0 { + gothrow("forcegc: phase error") + } + atomicstore(&forcegc.idle, 1) + goparkunlock(&forcegc.lock, "force gc (idle)") + // this goroutine is explicitly resumed by sysmon + if debug.gctrace > 0 { + println("GC forced") + } + gogc(1) + } +} + +//go:nosplit + +// Gosched yields the processor, allowing other goroutines to run. It does not +// suspend the current goroutine, so execution resumes automatically. +func Gosched() { + mcall(gosched_m) +} + +// Puts the current goroutine into a waiting state and calls unlockf. +// If unlockf returns false, the goroutine is resumed. +func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) { + mp := acquirem() + gp := mp.curg + status := readgstatus(gp) + if status != _Grunning && status != _Gscanrunning { + gothrow("gopark: bad g status") + } + mp.waitlock = lock + mp.waitunlockf = unlockf + gp.waitreason = reason + releasem(mp) + // can't do anything that might move the G between Ms here. + mcall(park_m) +} + +// Puts the current goroutine into a waiting state and unlocks the lock. +// The goroutine can be made runnable again by calling goready(gp). +func goparkunlock(lock *mutex, reason string) { + gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason) +} + +func goready(gp *g) { + mp := acquirem() + mp.ptrarg[0] = unsafe.Pointer(gp) + onM(ready_m) + releasem(mp) +} + +//go:nosplit +func acquireSudog() *sudog { + c := gomcache() + s := c.sudogcache + if s != nil { + if s.elem != nil { + gothrow("acquireSudog: found s.elem != nil in cache") + } + c.sudogcache = s.next + s.next = nil + return s + } + + // Delicate dance: the semaphore implementation calls + // acquireSudog, acquireSudog calls new(sudog), + // new calls malloc, malloc can call the garbage collector, + // and the garbage collector calls the semaphore implementation + // in stoptheworld. + // Break the cycle by doing acquirem/releasem around new(sudog). + // The acquirem/releasem increments m.locks during new(sudog), + // which keeps the garbage collector from being invoked. + mp := acquirem() + p := new(sudog) + releasem(mp) + return p +} + +//go:nosplit +func releaseSudog(s *sudog) { + if s.elem != nil { + gothrow("runtime: sudog with non-nil elem") + } + if s.selectdone != nil { + gothrow("runtime: sudog with non-nil selectdone") + } + if s.next != nil { + gothrow("runtime: sudog with non-nil next") + } + if s.prev != nil { + gothrow("runtime: sudog with non-nil prev") + } + if s.waitlink != nil { + gothrow("runtime: sudog with non-nil waitlink") + } + gp := getg() + if gp.param != nil { + gothrow("runtime: releaseSudog with non-nil gp.param") + } + c := gomcache() + s.next = c.sudogcache + c.sudogcache = s +} + +// funcPC returns the entry PC of the function f. +// It assumes that f is a func value. Otherwise the behavior is undefined. +//go:nosplit +func funcPC(f interface{}) uintptr { + return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize)) +} + +// called from assembly +func badmcall(fn func(*g)) { + gothrow("runtime: mcall called on m->g0 stack") +} + +func badmcall2(fn func(*g)) { + gothrow("runtime: mcall function returned") +} + +func badreflectcall() { + panic("runtime: arg size to reflect.call more than 1GB") +} + +func lockedOSThread() bool { + gp := getg() + return gp.lockedm != nil && gp.m.lockedg != nil +} + +func newP() *p { + return new(p) +} + +func newM() *m { + return new(m) +} + +func newG() *g { + return new(g) +} + +func allgadd(gp *g) { + if readgstatus(gp) == _Gidle { + gothrow("allgadd: bad status Gidle") + } + + lock(&allglock) + allgs = append(allgs, gp) + allg = &allgs[0] + allglen = uintptr(len(allgs)) + unlock(&allglock) +} diff --git a/libgo/go/runtime/race0.go b/libgo/go/runtime/race0.go new file mode 100644 index 0000000..5d90cc8 --- /dev/null +++ b/libgo/go/runtime/race0.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !race + +// Dummy race detection API, used when not built with -race. + +package runtime + +import ( + "unsafe" +) + +const raceenabled = false + +// Because raceenabled is false, none of these functions should be called. + +func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func raceinit() { gothrow("race") } +func racefini() { gothrow("race") } +func racemapshadow(addr unsafe.Pointer, size uintptr) { gothrow("race") } +func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") } +func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") } +func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") } +func raceacquire(addr unsafe.Pointer) { gothrow("race") } +func raceacquireg(gp *g, addr unsafe.Pointer) { gothrow("race") } +func racerelease(addr unsafe.Pointer) { gothrow("race") } +func racereleaseg(gp *g, addr unsafe.Pointer) { gothrow("race") } +func racereleasemerge(addr unsafe.Pointer) { gothrow("race") } +func racereleasemergeg(gp *g, addr unsafe.Pointer) { gothrow("race") } +func racefingo() { gothrow("race") } +func racemalloc(p unsafe.Pointer, sz uintptr) { gothrow("race") } +func racegostart(pc uintptr) uintptr { gothrow("race"); return 0 } +func racegoend() { gothrow("race") } diff --git a/libgo/go/runtime/rdebug.go b/libgo/go/runtime/rdebug.go new file mode 100644 index 0000000..e5e6911 --- /dev/null +++ b/libgo/go/runtime/rdebug.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +func setMaxStack(in int) (out int) { + out = int(maxstacksize) + maxstacksize = uintptr(in) + return out +} + +func setGCPercent(in int32) (out int32) { + mp := acquirem() + mp.scalararg[0] = uintptr(int(in)) + onM(setgcpercent_m) + out = int32(int(mp.scalararg[0])) + releasem(mp) + return out +} + +func setPanicOnFault(new bool) (old bool) { + mp := acquirem() + old = mp.curg.paniconfault + mp.curg.paniconfault = new + releasem(mp) + return old +} + +func setMaxThreads(in int) (out int) { + mp := acquirem() + mp.scalararg[0] = uintptr(in) + onM(setmaxthreads_m) + out = int(mp.scalararg[0]) + releasem(mp) + return out +} diff --git a/libgo/go/runtime/rune.go b/libgo/go/runtime/rune.go new file mode 100644 index 0000000..a9f6835 --- /dev/null +++ b/libgo/go/runtime/rune.go @@ -0,0 +1,219 @@ +/* + * The authors of this software are Rob Pike and Ken Thompson. + * Copyright (c) 2002 by Lucent Technologies. + * Portions Copyright 2009 The Go Authors. All rights reserved. + * Permission to use, copy, modify, and distribute this software for any + * purpose without fee is hereby granted, provided that this entire notice + * is included in all copies of any software which is or includes a copy + * or modification of this software and in all copies of the supporting + * documentation for such software. + * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED + * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY + * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY + * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. + */ + +/* + * This code is copied, with slight editing due to type differences, + * from a subset of ../lib9/utf/rune.c + */ + +package runtime + +const ( + bit1 = 7 + bitx = 6 + bit2 = 5 + bit3 = 4 + bit4 = 3 + bit5 = 2 + + t1 = ((1 << (bit1 + 1)) - 1) ^ 0xFF /* 0000 0000 */ + tx = ((1 << (bitx + 1)) - 1) ^ 0xFF /* 1000 0000 */ + t2 = ((1 << (bit2 + 1)) - 1) ^ 0xFF /* 1100 0000 */ + t3 = ((1 << (bit3 + 1)) - 1) ^ 0xFF /* 1110 0000 */ + t4 = ((1 << (bit4 + 1)) - 1) ^ 0xFF /* 1111 0000 */ + t5 = ((1 << (bit5 + 1)) - 1) ^ 0xFF /* 1111 1000 */ + + rune1 = (1 << (bit1 + 0*bitx)) - 1 /* 0000 0000 0111 1111 */ + rune2 = (1 << (bit2 + 1*bitx)) - 1 /* 0000 0111 1111 1111 */ + rune3 = (1 << (bit3 + 2*bitx)) - 1 /* 1111 1111 1111 1111 */ + rune4 = (1 << (bit4 + 3*bitx)) - 1 /* 0001 1111 1111 1111 1111 1111 */ + + maskx = (1 << bitx) - 1 /* 0011 1111 */ + testx = maskx ^ 0xFF /* 1100 0000 */ + + runeerror = 0xFFFD + runeself = 0x80 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + bad = runeerror + + runemax = 0x10FFFF /* maximum rune value */ +) + +/* + * Modified by Wei-Hwa Huang, Google Inc., on 2004-09-24 + * This is a slower but "safe" version of the old chartorune + * that works on strings that are not necessarily null-terminated. + * + * If you know for sure that your string is null-terminated, + * chartorune will be a bit faster. + * + * It is guaranteed not to attempt to access "length" + * past the incoming pointer. This is to avoid + * possible access violations. If the string appears to be + * well-formed but incomplete (i.e., to get the whole Rune + * we'd need to read past str+length) then we'll set the Rune + * to Bad and return 0. + * + * Note that if we have decoding problems for other + * reasons, we return 1 instead of 0. + */ +func charntorune(s string) (rune, int) { + /* When we're not allowed to read anything */ + if len(s) <= 0 { + return bad, 1 + } + + /* + * one character sequence (7-bit value) + * 00000-0007F => T1 + */ + c := s[0] + if c < tx { + return rune(c), 1 + } + + // If we can't read more than one character we must stop + if len(s) <= 1 { + return bad, 1 + } + + /* + * two character sequence (11-bit value) + * 0080-07FF => t2 tx + */ + c1 := s[1] ^ tx + if (c1 & testx) != 0 { + return bad, 1 + } + if c < t3 { + if c < t2 { + return bad, 1 + } + l := ((rune(c) << bitx) | rune(c1)) & rune2 + if l <= rune1 { + return bad, 1 + } + return l, 2 + } + + // If we can't read more than two characters we must stop + if len(s) <= 2 { + return bad, 1 + } + + /* + * three character sequence (16-bit value) + * 0800-FFFF => t3 tx tx + */ + c2 := s[2] ^ tx + if (c2 & testx) != 0 { + return bad, 1 + } + if c < t4 { + l := ((((rune(c) << bitx) | rune(c1)) << bitx) | rune(c2)) & rune3 + if l <= rune2 { + return bad, 1 + } + if surrogateMin <= l && l <= surrogateMax { + return bad, 1 + } + return l, 3 + } + + if len(s) <= 3 { + return bad, 1 + } + + /* + * four character sequence (21-bit value) + * 10000-1FFFFF => t4 tx tx tx + */ + c3 := s[3] ^ tx + if (c3 & testx) != 0 { + return bad, 1 + } + if c < t5 { + l := ((((((rune(c) << bitx) | rune(c1)) << bitx) | rune(c2)) << bitx) | rune(c3)) & rune4 + if l <= rune3 || l > runemax { + return bad, 1 + } + return l, 4 + } + + // Support for 5-byte or longer UTF-8 would go here, but + // since we don't have that, we'll just return bad. + return bad, 1 +} + +// runetochar converts r to bytes and writes the result to str. +// returns the number of bytes generated. +func runetochar(str []byte, r rune) int { + /* runes are signed, so convert to unsigned for range check. */ + c := uint32(r) + /* + * one character sequence + * 00000-0007F => 00-7F + */ + if c <= rune1 { + str[0] = byte(c) + return 1 + } + /* + * two character sequence + * 0080-07FF => t2 tx + */ + if c <= rune2 { + str[0] = byte(t2 | (c >> (1 * bitx))) + str[1] = byte(tx | (c & maskx)) + return 2 + } + + /* + * If the rune is out of range or a surrogate half, convert it to the error rune. + * Do this test here because the error rune encodes to three bytes. + * Doing it earlier would duplicate work, since an out of range + * rune wouldn't have fit in one or two bytes. + */ + if c > runemax { + c = runeerror + } + if surrogateMin <= c && c <= surrogateMax { + c = runeerror + } + + /* + * three character sequence + * 0800-FFFF => t3 tx tx + */ + if c <= rune3 { + str[0] = byte(t3 | (c >> (2 * bitx))) + str[1] = byte(tx | ((c >> (1 * bitx)) & maskx)) + str[2] = byte(tx | (c & maskx)) + return 3 + } + + /* + * four character sequence (21-bit value) + * 10000-1FFFFF => t4 tx tx tx + */ + str[0] = byte(t4 | (c >> (3 * bitx))) + str[1] = byte(tx | ((c >> (2 * bitx)) & maskx)) + str[2] = byte(tx | ((c >> (1 * bitx)) & maskx)) + str[3] = byte(tx | (c & maskx)) + return 4 +} diff --git a/libgo/go/runtime/runtime.go b/libgo/go/runtime/runtime.go new file mode 100644 index 0000000..4e4e1d1 --- /dev/null +++ b/libgo/go/runtime/runtime.go @@ -0,0 +1,60 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +var ticks struct { + lock mutex + val uint64 +} + +var tls0 [8]uintptr // available storage for m0's TLS; not necessarily used; opaque to GC + +// Note: Called by runtime/pprof in addition to runtime code. +func tickspersecond() int64 { + r := int64(atomicload64(&ticks.val)) + if r != 0 { + return r + } + lock(&ticks.lock) + r = int64(ticks.val) + if r == 0 { + t0 := nanotime() + c0 := cputicks() + usleep(100 * 1000) + t1 := nanotime() + c1 := cputicks() + if t1 == t0 { + t1++ + } + r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0) + if r == 0 { + r++ + } + atomicstore64(&ticks.val, uint64(r)) + } + unlock(&ticks.lock) + return r +} + +func makeStringSlice(n int) []string { + return make([]string, n) +} + +// TODO: Move to parfor.go when parfor.c becomes parfor.go. +func parforalloc(nthrmax uint32) *parfor { + return &parfor{ + thr: &make([]parforthread, nthrmax)[0], + nthrmax: nthrmax, + } +} + +var envs []string +var argslice []string + +// called from syscall +func runtime_envs() []string { return envs } + +// called from os +func runtime_args() []string { return argslice } diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go index 5c50467..8059d1a 100644 --- a/libgo/go/runtime/runtime_test.go +++ b/libgo/go/runtime/runtime_test.go @@ -97,8 +97,9 @@ func BenchmarkDeferMany(b *testing.B) { // The value reported will include the padding between runtime.gogo and the // next function in memory. That's fine. func TestRuntimeGogoBytes(t *testing.T) { - if GOOS == "nacl" { - t.Skip("skipping on nacl") + switch GOOS { + case "android", "nacl": + t.Skipf("skipping on %s", GOOS) } dir, err := ioutil.TempDir("", "go-build") @@ -107,7 +108,7 @@ func TestRuntimeGogoBytes(t *testing.T) { } defer os.RemoveAll(dir) - out, err := exec.Command("go", "build", "-o", dir+"/hello", "../../../test/helloworld.go").CombinedOutput() + out, err := exec.Command("go", "build", "-o", dir+"/hello", "../../test/helloworld.go").CombinedOutput() if err != nil { t.Fatalf("building hello world: %v\n%s", err, out) } @@ -159,8 +160,8 @@ var faultAddrs = []uint64{ // or else malformed. 0xffffffffffffffff, 0xfffffffffffff001, - // no 0xffffffffffff0001; 0xffff0001 is mapped for 32-bit user space on OS X - // no 0xfffffffffff00001; 0xfff00001 is mapped for 32-bit user space sometimes on Linux + 0xffffffffffff0001, + 0xfffffffffff00001, 0xffffffffff000001, 0xfffffffff0000001, 0xffffffff00000001, @@ -184,29 +185,68 @@ func TestSetPanicOnFault(t *testing.T) { old := debug.SetPanicOnFault(true) defer debug.SetPanicOnFault(old) + nfault := 0 for _, addr := range faultAddrs { - if Compiler == "gccgo" && GOARCH == "386" && (addr&0xff000000) != 0 { - // On gccgo these addresses can be used for - // the thread stack. - continue - } - testSetPanicOnFault(t, uintptr(addr)) + testSetPanicOnFault(t, uintptr(addr), &nfault) + } + if nfault == 0 { + t.Fatalf("none of the addresses faulted") } } -func testSetPanicOnFault(t *testing.T, addr uintptr) { +func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) { if GOOS == "nacl" { t.Skip("nacl doesn't seem to fault on high addresses") } defer func() { - if err := recover(); err == nil { - t.Fatalf("did not find error in recover") + if err := recover(); err != nil { + *nfault++ } }() - var p *int - p = (*int)(unsafe.Pointer(addr)) - println(*p) - t.Fatalf("still here - should have faulted on address %#x", addr) + // The read should fault, except that sometimes we hit + // addresses that have had C or kernel pages mapped there + // readable by user code. So just log the content. + // If no addresses fault, we'll fail the test. + v := *(*byte)(unsafe.Pointer(addr)) + t.Logf("addr %#x: %#x\n", addr, v) +} + +func eqstring_generic(s1, s2 string) bool { + if len(s1) != len(s2) { + return false + } + // optimization in assembly versions: + // if s1.str == s2.str { return true } + for i := 0; i < len(s1); i++ { + if s1[i] != s2[i] { + return false + } + } + return true +} + +func TestEqString(t *testing.T) { + // This isn't really an exhaustive test of eqstring, it's + // just a convenient way of documenting (via eqstring_generic) + // what eqstring does. + s := []string{ + "", + "a", + "c", + "aaa", + "ccc", + "cccc"[:3], // same contents, different string + "1234567890", + } + for _, s1 := range s { + for _, s2 := range s { + x := s1 == s2 + y := eqstring_generic(s1, s2) + if x != y { + t.Errorf(`eqstring("%s","%s") = %t, want %t`, s1, s2, x, y) + } + } + } } diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go new file mode 100644 index 0000000..f735a71 --- /dev/null +++ b/libgo/go/runtime/select.go @@ -0,0 +1,651 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +// This file contains the implementation of Go select statements. + +import "unsafe" + +const ( + debugSelect = false +) + +var ( + chansendpc = funcPC(chansend) + chanrecvpc = funcPC(chanrecv) +) + +func selectsize(size uintptr) uintptr { + selsize := unsafe.Sizeof(_select{}) + + (size-1)*unsafe.Sizeof(_select{}.scase[0]) + + size*unsafe.Sizeof(*_select{}.lockorder) + + size*unsafe.Sizeof(*_select{}.pollorder) + return round(selsize, _Int64Align) +} + +func newselect(sel *_select, selsize int64, size int32) { + if selsize != int64(selectsize(uintptr(size))) { + print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n") + gothrow("bad select size") + } + sel.tcase = uint16(size) + sel.ncase = 0 + sel.lockorder = (**hchan)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(_select{}.scase[0]))) + sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*_select{}.lockorder))) + + if debugSelect { + print("newselect s=", sel, " size=", size, "\n") + } +} + +//go:nosplit +func selectsend(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) { + // nil cases do not compete + if c != nil { + selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + } + return +} + +// cut in half to give stack a chance to split +func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) { + i := sel.ncase + if i >= sel.tcase { + gothrow("selectsend: too many cases") + } + sel.ncase = i + 1 + cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) + + cas.pc = pc + cas._chan = c + cas.so = uint16(so) + cas.kind = _CaseSend + cas.elem = elem + + if debugSelect { + print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n") + } +} + +//go:nosplit +func selectrecv(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) { + // nil cases do not compete + if c != nil { + selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + } + return +} + +//go:nosplit +func selectrecv2(sel *_select, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) { + // nil cases do not compete + if c != nil { + selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + } + return +} + +func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) { + i := sel.ncase + if i >= sel.tcase { + gothrow("selectrecv: too many cases") + } + sel.ncase = i + 1 + cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) + cas.pc = pc + cas._chan = c + cas.so = uint16(so) + cas.kind = _CaseRecv + cas.elem = elem + cas.receivedp = received + + if debugSelect { + print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n") + } +} + +//go:nosplit +func selectdefault(sel *_select) (selected bool) { + selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel))) + return +} + +func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) { + i := sel.ncase + if i >= sel.tcase { + gothrow("selectdefault: too many cases") + } + sel.ncase = i + 1 + cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0]))) + cas.pc = callerpc + cas._chan = nil + cas.so = uint16(so) + cas.kind = _CaseDefault + + if debugSelect { + print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n") + } +} + +func sellock(sel *_select) { + lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} + lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) + var c *hchan + for _, c0 := range lockorder { + if c0 != nil && c0 != c { + c = c0 + lock(&c.lock) + } + } +} + +func selunlock(sel *_select) { + // We must be very careful here to not touch sel after we have unlocked + // the last lock, because sel can be freed right after the last unlock. + // Consider the following situation. + // First M calls runtime·park() in runtime·selectgo() passing the sel. + // Once runtime·park() has unlocked the last lock, another M makes + // the G that calls select runnable again and schedules it for execution. + // When the G runs on another M, it locks all the locks and frees sel. + // Now if the first M touches sel, it will access freed memory. + n := int(sel.ncase) + r := 0 + lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), n, n} + lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) + // skip the default case + if n > 0 && lockorder[0] == nil { + r = 1 + } + for i := n - 1; i >= r; i-- { + c := lockorder[i] + if i > 0 && c == lockorder[i-1] { + continue // will unlock it on the next iteration + } + unlock(&c.lock) + } +} + +func selparkcommit(gp *g, sel *_select) bool { + selunlock(sel) + return true +} + +func block() { + gopark(nil, nil, "select (no cases)") // forever +} + +// overwrites return pc on stack to signal which case of the select +// to run, so cannot appear at the top of a split stack. +//go:nosplit +func selectgo(sel *_select) { + pc, offset := selectgoImpl(sel) + *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true + setcallerpc(unsafe.Pointer(&sel), pc) +} + +// selectgoImpl returns scase.pc and scase.so for the select +// case which fired. +func selectgoImpl(sel *_select) (uintptr, uint16) { + if debugSelect { + print("select: sel=", sel, "\n") + } + + scaseslice := sliceStruct{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)} + scases := *(*[]scase)(unsafe.Pointer(&scaseslice)) + + var t0 int64 + if blockprofilerate > 0 { + t0 = cputicks() + for i := 0; i < int(sel.ncase); i++ { + scases[i].releasetime = -1 + } + } + + // The compiler rewrites selects that statically have + // only 0 or 1 cases plus default into simpler constructs. + // The only way we can end up with such small sel.ncase + // values here is for a larger select in which most channels + // have been nilled out. The general code handles those + // cases correctly, and they are rare enough not to bother + // optimizing (and needing to test). + + // generate permuted order + pollslice := sliceStruct{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)} + pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice)) + for i := 0; i < int(sel.ncase); i++ { + pollorder[i] = uint16(i) + } + for i := 1; i < int(sel.ncase); i++ { + o := pollorder[i] + j := int(fastrand1()) % (i + 1) + pollorder[i] = pollorder[j] + pollorder[j] = o + } + + // sort the cases by Hchan address to get the locking order. + // simple heap sort, to guarantee n log n time and constant stack footprint. + lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)} + lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice)) + for i := 0; i < int(sel.ncase); i++ { + j := i + c := scases[j]._chan + for j > 0 && lockorder[(j-1)/2].sortkey() < c.sortkey() { + k := (j - 1) / 2 + lockorder[j] = lockorder[k] + j = k + } + lockorder[j] = c + } + for i := int(sel.ncase) - 1; i >= 0; i-- { + c := lockorder[i] + lockorder[i] = lockorder[0] + j := 0 + for { + k := j*2 + 1 + if k >= i { + break + } + if k+1 < i && lockorder[k].sortkey() < lockorder[k+1].sortkey() { + k++ + } + if c.sortkey() < lockorder[k].sortkey() { + lockorder[j] = lockorder[k] + j = k + continue + } + break + } + lockorder[j] = c + } + /* + for i := 0; i+1 < int(sel.ncase); i++ { + if lockorder[i].sortkey() > lockorder[i+1].sortkey() { + print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") + gothrow("select: broken sort") + } + } + */ + + // lock all the channels involved in the select + sellock(sel) + + var ( + gp *g + done uint32 + sg *sudog + c *hchan + k *scase + sglist *sudog + sgnext *sudog + ) + +loop: + // pass 1 - look for something already waiting + var dfl *scase + var cas *scase + for i := 0; i < int(sel.ncase); i++ { + cas = &scases[pollorder[i]] + c = cas._chan + + switch cas.kind { + case _CaseRecv: + if c.dataqsiz > 0 { + if c.qcount > 0 { + goto asyncrecv + } + } else { + sg = c.sendq.dequeue() + if sg != nil { + goto syncrecv + } + } + if c.closed != 0 { + goto rclose + } + + case _CaseSend: + if raceenabled { + racereadpc(unsafe.Pointer(c), cas.pc, chansendpc) + } + if c.closed != 0 { + goto sclose + } + if c.dataqsiz > 0 { + if c.qcount < c.dataqsiz { + goto asyncsend + } + } else { + sg = c.recvq.dequeue() + if sg != nil { + goto syncsend + } + } + + case _CaseDefault: + dfl = cas + } + } + + if dfl != nil { + selunlock(sel) + cas = dfl + goto retc + } + + // pass 2 - enqueue on all chans + gp = getg() + done = 0 + for i := 0; i < int(sel.ncase); i++ { + cas = &scases[pollorder[i]] + c = cas._chan + sg := acquireSudog() + sg.g = gp + // Note: selectdone is adjusted for stack copies in stack.c:adjustsudogs + sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done))) + sg.elem = cas.elem + sg.releasetime = 0 + if t0 != 0 { + sg.releasetime = -1 + } + sg.waitlink = gp.waiting + gp.waiting = sg + + switch cas.kind { + case _CaseRecv: + c.recvq.enqueue(sg) + + case _CaseSend: + c.sendq.enqueue(sg) + } + } + + // wait for someone to wake us up + gp.param = nil + gopark(unsafe.Pointer(funcPC(selparkcommit)), unsafe.Pointer(sel), "select") + + // someone woke us up + sellock(sel) + sg = (*sudog)(gp.param) + gp.param = nil + + // pass 3 - dequeue from unsuccessful chans + // otherwise they stack up on quiet channels + // record the successful case, if any. + // We singly-linked up the SudoGs in case order, so when + // iterating through the linked list they are in reverse order. + cas = nil + sglist = gp.waiting + // Clear all selectdone and elem before unlinking from gp.waiting. + // They must be cleared before being put back into the sudog cache. + // Clear before unlinking, because if a stack copy happens after the unlink, + // they will not be updated, they will be left pointing to the old stack, + // which creates dangling pointers, which may be detected by the + // garbage collector. + for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { + sg1.selectdone = nil + sg1.elem = nil + } + gp.waiting = nil + for i := int(sel.ncase) - 1; i >= 0; i-- { + k = &scases[pollorder[i]] + if sglist.releasetime > 0 { + k.releasetime = sglist.releasetime + } + if sg == sglist { + cas = k + } else { + c = k._chan + if k.kind == _CaseSend { + c.sendq.dequeueSudoG(sglist) + } else { + c.recvq.dequeueSudoG(sglist) + } + } + sgnext = sglist.waitlink + sglist.waitlink = nil + releaseSudog(sglist) + sglist = sgnext + } + + if cas == nil { + goto loop + } + + c = cas._chan + + if c.dataqsiz > 0 { + gothrow("selectgo: shouldn't happen") + } + + if debugSelect { + print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n") + } + + if cas.kind == _CaseRecv { + if cas.receivedp != nil { + *cas.receivedp = true + } + } + + if raceenabled { + if cas.kind == _CaseRecv && cas.elem != nil { + raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) + } else if cas.kind == _CaseSend { + raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) + } + } + + selunlock(sel) + goto retc + +asyncrecv: + // can receive from buffer + if raceenabled { + if cas.elem != nil { + raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) + } + raceacquire(chanbuf(c, c.recvx)) + racerelease(chanbuf(c, c.recvx)) + } + if cas.receivedp != nil { + *cas.receivedp = true + } + if cas.elem != nil { + memmove(cas.elem, chanbuf(c, c.recvx), uintptr(c.elemsize)) + } + memclr(chanbuf(c, c.recvx), uintptr(c.elemsize)) + c.recvx++ + if c.recvx == c.dataqsiz { + c.recvx = 0 + } + c.qcount-- + sg = c.sendq.dequeue() + if sg != nil { + gp = sg.g + selunlock(sel) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } else { + selunlock(sel) + } + goto retc + +asyncsend: + // can send to buffer + if raceenabled { + raceacquire(chanbuf(c, c.sendx)) + racerelease(chanbuf(c, c.sendx)) + raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) + } + memmove(chanbuf(c, c.sendx), cas.elem, uintptr(c.elemsize)) + c.sendx++ + if c.sendx == c.dataqsiz { + c.sendx = 0 + } + c.qcount++ + sg = c.recvq.dequeue() + if sg != nil { + gp = sg.g + selunlock(sel) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + } else { + selunlock(sel) + } + goto retc + +syncrecv: + // can receive from sleeping sender (sg) + if raceenabled { + if cas.elem != nil { + raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc) + } + racesync(c, sg) + } + selunlock(sel) + if debugSelect { + print("syncrecv: sel=", sel, " c=", c, "\n") + } + if cas.receivedp != nil { + *cas.receivedp = true + } + if cas.elem != nil { + memmove(cas.elem, sg.elem, uintptr(c.elemsize)) + } + sg.elem = nil + gp = sg.g + gp.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + goto retc + +rclose: + // read at end of closed channel + selunlock(sel) + if cas.receivedp != nil { + *cas.receivedp = false + } + if cas.elem != nil { + memclr(cas.elem, uintptr(c.elemsize)) + } + if raceenabled { + raceacquire(unsafe.Pointer(c)) + } + goto retc + +syncsend: + // can send to sleeping receiver (sg) + if raceenabled { + raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc) + racesync(c, sg) + } + selunlock(sel) + if debugSelect { + print("syncsend: sel=", sel, " c=", c, "\n") + } + if sg.elem != nil { + memmove(sg.elem, cas.elem, uintptr(c.elemsize)) + } + sg.elem = nil + gp = sg.g + gp.param = unsafe.Pointer(sg) + if sg.releasetime != 0 { + sg.releasetime = cputicks() + } + goready(gp) + +retc: + if cas.releasetime > 0 { + blockevent(cas.releasetime-t0, 2) + } + return cas.pc, cas.so + +sclose: + // send on closed channel + selunlock(sel) + panic("send on closed channel") +} + +func (c *hchan) sortkey() uintptr { + // TODO(khr): if we have a moving garbage collector, we'll need to + // change this function. + return uintptr(unsafe.Pointer(c)) +} + +// A runtimeSelect is a single case passed to rselect. +// This must match ../reflect/value.go:/runtimeSelect +type runtimeSelect struct { + dir selectDir + typ unsafe.Pointer // channel type (not used here) + ch *hchan // channel + val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir) +} + +// These values must match ../reflect/value.go:/SelectDir. +type selectDir int + +const ( + _ selectDir = iota + selectSend // case Chan <- Send + selectRecv // case <-Chan: + selectDefault // default +) + +func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) { + // flagNoScan is safe here, because all objects are also referenced from cases. + size := selectsize(uintptr(len(cases))) + sel := (*_select)(mallocgc(size, nil, flagNoScan)) + newselect(sel, int64(size), int32(len(cases))) + r := new(bool) + for i := range cases { + rc := &cases[i] + switch rc.dir { + case selectDefault: + selectdefaultImpl(sel, uintptr(i), 0) + case selectSend: + if rc.ch == nil { + break + } + selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0) + case selectRecv: + if rc.ch == nil { + break + } + selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0) + } + } + + pc, _ := selectgoImpl(sel) + chosen = int(pc) + recvOK = *r + return +} + +func (q *waitq) dequeueSudoG(s *sudog) { + var prevsgp *sudog + l := &q.first + for { + sgp := *l + if sgp == nil { + return + } + if sgp == s { + *l = sgp.next + if q.last == sgp { + q.last = prevsgp + } + s.next = nil + return + } + l = &sgp.next + prevsgp = sgp + } +} diff --git a/libgo/go/runtime/sema.go b/libgo/go/runtime/sema.go new file mode 100644 index 0000000..26dbd30 --- /dev/null +++ b/libgo/go/runtime/sema.go @@ -0,0 +1,275 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Semaphore implementation exposed to Go. +// Intended use is provide a sleep and wakeup +// primitive that can be used in the contended case +// of other synchronization primitives. +// Thus it targets the same goal as Linux's futex, +// but it has much simpler semantics. +// +// That is, don't think of these as semaphores. +// Think of them as a way to implement sleep and wakeup +// such that every sleep is paired with a single wakeup, +// even if, due to races, the wakeup happens before the sleep. +// +// See Mullender and Cox, ``Semaphores in Plan 9,'' +// http://swtch.com/semaphore.pdf + +package runtime + +import "unsafe" + +// Asynchronous semaphore for sync.Mutex. + +type semaRoot struct { + lock mutex + head *sudog + tail *sudog + nwait uint32 // Number of waiters. Read w/o the lock. +} + +// Prime to not correlate with any user patterns. +const semTabSize = 251 + +var semtable [semTabSize]struct { + root semaRoot + pad [_CacheLineSize - unsafe.Sizeof(semaRoot{})]byte +} + +// Called from sync/net packages. +func asyncsemacquire(addr *uint32) { + semacquire(addr, true) +} + +func asyncsemrelease(addr *uint32) { + semrelease(addr) +} + +// Called from runtime. +func semacquire(addr *uint32, profile bool) { + gp := getg() + if gp != gp.m.curg { + gothrow("semacquire not on the G stack") + } + + // Easy case. + if cansemacquire(addr) { + return + } + + // Harder case: + // increment waiter count + // try cansemacquire one more time, return if succeeded + // enqueue itself as a waiter + // sleep + // (waiter descriptor is dequeued by signaler) + s := acquireSudog() + root := semroot(addr) + t0 := int64(0) + s.releasetime = 0 + if profile && blockprofilerate > 0 { + t0 = cputicks() + s.releasetime = -1 + } + for { + lock(&root.lock) + // Add ourselves to nwait to disable "easy case" in semrelease. + xadd(&root.nwait, 1) + // Check cansemacquire to avoid missed wakeup. + if cansemacquire(addr) { + xadd(&root.nwait, -1) + unlock(&root.lock) + break + } + // Any semrelease after the cansemacquire knows we're waiting + // (we set nwait above), so go to sleep. + root.queue(addr, s) + goparkunlock(&root.lock, "semacquire") + if cansemacquire(addr) { + break + } + } + if s.releasetime > 0 { + blockevent(int64(s.releasetime)-t0, 3) + } + releaseSudog(s) +} + +func semrelease(addr *uint32) { + root := semroot(addr) + xadd(addr, 1) + + // Easy case: no waiters? + // This check must happen after the xadd, to avoid a missed wakeup + // (see loop in semacquire). + if atomicload(&root.nwait) == 0 { + return + } + + // Harder case: search for a waiter and wake it. + lock(&root.lock) + if atomicload(&root.nwait) == 0 { + // The count is already consumed by another goroutine, + // so no need to wake up another goroutine. + unlock(&root.lock) + return + } + s := root.head + for ; s != nil; s = s.next { + if s.elem == unsafe.Pointer(addr) { + xadd(&root.nwait, -1) + root.dequeue(s) + break + } + } + unlock(&root.lock) + if s != nil { + if s.releasetime != 0 { + s.releasetime = cputicks() + } + goready(s.g) + } +} + +func semroot(addr *uint32) *semaRoot { + return &semtable[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root +} + +func cansemacquire(addr *uint32) bool { + for { + v := atomicload(addr) + if v == 0 { + return false + } + if cas(addr, v, v-1) { + return true + } + } +} + +func (root *semaRoot) queue(addr *uint32, s *sudog) { + s.g = getg() + s.elem = unsafe.Pointer(addr) + s.next = nil + s.prev = root.tail + if root.tail != nil { + root.tail.next = s + } else { + root.head = s + } + root.tail = s +} + +func (root *semaRoot) dequeue(s *sudog) { + if s.next != nil { + s.next.prev = s.prev + } else { + root.tail = s.prev + } + if s.prev != nil { + s.prev.next = s.next + } else { + root.head = s.next + } + s.elem = nil + s.next = nil + s.prev = nil +} + +// Synchronous semaphore for sync.Cond. +type syncSema struct { + lock mutex + head *sudog + tail *sudog +} + +// Syncsemacquire waits for a pairing syncsemrelease on the same semaphore s. +func syncsemacquire(s *syncSema) { + lock(&s.lock) + if s.head != nil && s.head.nrelease > 0 { + // Have pending release, consume it. + var wake *sudog + s.head.nrelease-- + if s.head.nrelease == 0 { + wake = s.head + s.head = wake.next + if s.head == nil { + s.tail = nil + } + } + unlock(&s.lock) + if wake != nil { + wake.next = nil + goready(wake.g) + } + } else { + // Enqueue itself. + w := acquireSudog() + w.g = getg() + w.nrelease = -1 + w.next = nil + w.releasetime = 0 + t0 := int64(0) + if blockprofilerate > 0 { + t0 = cputicks() + w.releasetime = -1 + } + if s.tail == nil { + s.head = w + } else { + s.tail.next = w + } + s.tail = w + goparkunlock(&s.lock, "semacquire") + if t0 != 0 { + blockevent(int64(w.releasetime)-t0, 2) + } + releaseSudog(w) + } +} + +// Syncsemrelease waits for n pairing syncsemacquire on the same semaphore s. +func syncsemrelease(s *syncSema, n uint32) { + lock(&s.lock) + for n > 0 && s.head != nil && s.head.nrelease < 0 { + // Have pending acquire, satisfy it. + wake := s.head + s.head = wake.next + if s.head == nil { + s.tail = nil + } + if wake.releasetime != 0 { + wake.releasetime = cputicks() + } + wake.next = nil + goready(wake.g) + n-- + } + if n > 0 { + // enqueue itself + w := acquireSudog() + w.g = getg() + w.nrelease = int32(n) + w.next = nil + w.releasetime = 0 + if s.tail == nil { + s.head = w + } else { + s.tail.next = w + } + s.tail = w + goparkunlock(&s.lock, "semarelease") + releaseSudog(w) + } else { + unlock(&s.lock) + } +} + +func syncsemcheck(sz uintptr) { + if sz != unsafe.Sizeof(syncSema{}) { + print("runtime: bad syncSema size - sync=", sz, " runtime=", unsafe.Sizeof(syncSema{}), "\n") + gothrow("bad syncSema size") + } +} diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go new file mode 100644 index 0000000..ba77b6e --- /dev/null +++ b/libgo/go/runtime/signal_unix.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package runtime + +func sigpipe() + +func os_sigpipe() { + onM(sigpipe) +} diff --git a/libgo/go/runtime/sigpanic_unix.go b/libgo/go/runtime/sigpanic_unix.go new file mode 100644 index 0000000..6807985 --- /dev/null +++ b/libgo/go/runtime/sigpanic_unix.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package runtime + +func signame(int32) *byte + +func sigpanic() { + g := getg() + if !canpanic(g) { + gothrow("unexpected signal during runtime execution") + } + + switch g.sig { + case _SIGBUS: + if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _SIGSEGV: + if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 || g.paniconfault { + panicmem() + } + print("unexpected fault address ", hex(g.sigcode1), "\n") + gothrow("fault") + case _SIGFPE: + switch g.sigcode0 { + case _FPE_INTDIV: + panicdivide() + case _FPE_INTOVF: + panicoverflow() + } + panicfloat() + } + panic(errorString(gostringnocopy(signame(g.sig)))) +} diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go new file mode 100644 index 0000000..2d9c24d --- /dev/null +++ b/libgo/go/runtime/sigqueue.go @@ -0,0 +1,173 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements runtime support for signal handling. +// +// Most synchronization primitives are not available from +// the signal handler (it cannot block, allocate memory, or use locks) +// so the handler communicates with a processing goroutine +// via struct sig, below. +// +// sigsend is called by the signal handler to queue a new signal. +// signal_recv is called by the Go program to receive a newly queued signal. +// Synchronization between sigsend and signal_recv is based on the sig.state +// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending. +// sigReceiving means that signal_recv is blocked on sig.Note and there are no +// new pending signals. +// sigSending means that sig.mask *may* contain new pending signals, +// signal_recv can't be blocked in this state. +// sigIdle means that there are no new pending signals and signal_recv is not blocked. +// Transitions between states are done atomically with CAS. +// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask. +// If several sigsends and signal_recv execute concurrently, it can lead to +// unnecessary rechecks of sig.mask, but it cannot lead to missed signals +// nor deadlocks. + +package runtime + +import "unsafe" + +var sig struct { + note note + mask [(_NSIG + 31) / 32]uint32 + wanted [(_NSIG + 31) / 32]uint32 + recv [(_NSIG + 31) / 32]uint32 + state uint32 + inuse bool +} + +const ( + sigIdle = iota + sigReceiving + sigSending +) + +// Called from sighandler to send a signal back out of the signal handling thread. +// Reports whether the signal was sent. If not, the caller typically crashes the program. +func sigsend(s int32) bool { + bit := uint32(1) << uint(s&31) + if !sig.inuse || s < 0 || int(s) >= 32*len(sig.wanted) || sig.wanted[s/32]&bit == 0 { + return false + } + + // Add signal to outgoing queue. + for { + mask := sig.mask[s/32] + if mask&bit != 0 { + return true // signal already in queue + } + if cas(&sig.mask[s/32], mask, mask|bit) { + break + } + } + + // Notify receiver that queue has new bit. +Send: + for { + switch atomicload(&sig.state) { + default: + gothrow("sigsend: inconsistent state") + case sigIdle: + if cas(&sig.state, sigIdle, sigSending) { + break Send + } + case sigSending: + // notification already pending + break Send + case sigReceiving: + if cas(&sig.state, sigReceiving, sigIdle) { + notewakeup(&sig.note) + break Send + } + } + } + + return true +} + +// Called to receive the next queued signal. +// Must only be called from a single goroutine at a time. +func signal_recv() uint32 { + for { + // Serve any signals from local copy. + for i := uint32(0); i < _NSIG; i++ { + if sig.recv[i/32]&(1<<(i&31)) != 0 { + sig.recv[i/32] &^= 1 << (i & 31) + return i + } + } + + // Wait for updates to be available from signal sender. + Receive: + for { + switch atomicload(&sig.state) { + default: + gothrow("signal_recv: inconsistent state") + case sigIdle: + if cas(&sig.state, sigIdle, sigReceiving) { + notetsleepg(&sig.note, -1) + noteclear(&sig.note) + break Receive + } + case sigSending: + if cas(&sig.state, sigSending, sigIdle) { + break Receive + } + } + } + + // Incorporate updates from sender into local copy. + for i := range sig.mask { + sig.recv[i] = xchg(&sig.mask[i], 0) + } + } +} + +// Must only be called from a single goroutine at a time. +func signal_enable(s uint32) { + if !sig.inuse { + // The first call to signal_enable is for us + // to use for initialization. It does not pass + // signal information in m. + sig.inuse = true // enable reception of signals; cannot disable + noteclear(&sig.note) + return + } + + if int(s) >= len(sig.wanted)*32 { + return + } + sig.wanted[s/32] |= 1 << (s & 31) + sigenable_go(s) +} + +// Must only be called from a single goroutine at a time. +func signal_disable(s uint32) { + if int(s) >= len(sig.wanted)*32 { + return + } + sig.wanted[s/32] &^= 1 << (s & 31) + sigdisable_go(s) +} + +// This runs on a foreign stack, without an m or a g. No stack split. +//go:nosplit +func badsignal(sig uintptr) { + cgocallback(unsafe.Pointer(funcPC(sigsend)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig)) +} + +func sigenable_m() +func sigdisable_m() + +func sigenable_go(s uint32) { + g := getg() + g.m.scalararg[0] = uintptr(s) + onM(sigenable_m) +} + +func sigdisable_go(s uint32) { + g := getg() + g.m.scalararg[0] = uintptr(s) + onM(sigdisable_m) +} diff --git a/libgo/go/runtime/slice.go b/libgo/go/runtime/slice.go new file mode 100644 index 0000000..171087d --- /dev/null +++ b/libgo/go/runtime/slice.go @@ -0,0 +1,139 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +type sliceStruct struct { + array unsafe.Pointer + len int + cap int +} + +// TODO: take uintptrs instead of int64s? +func makeslice(t *slicetype, len64 int64, cap64 int64) sliceStruct { + // NOTE: The len > MaxMem/elemsize check here is not strictly necessary, + // but it produces a 'len out of range' error instead of a 'cap out of range' error + // when someone does make([]T, bignumber). 'cap out of range' is true too, + // but since the cap is only being supplied implicitly, saying len is clearer. + // See issue 4085. + len := int(len64) + if len64 < 0 || int64(len) != len64 || t.elem.size > 0 && uintptr(len) > maxmem/uintptr(t.elem.size) { + panic(errorString("makeslice: len out of range")) + } + cap := int(cap64) + if cap < len || int64(cap) != cap64 || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) { + panic(errorString("makeslice: cap out of range")) + } + p := newarray(t.elem, uintptr(cap)) + return sliceStruct{p, len, cap} +} + +// TODO: take uintptr instead of int64? +func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct { + if n < 1 { + panic(errorString("growslice: invalid n")) + } + + cap64 := int64(old.cap) + n + cap := int(cap64) + + if int64(cap) != cap64 || cap < old.cap || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) { + panic(errorString("growslice: cap out of range")) + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&t)) + racereadrangepc(old.array, uintptr(old.len*int(t.elem.size)), callerpc, funcPC(growslice)) + } + + et := t.elem + if et.size == 0 { + return sliceStruct{old.array, old.len, cap} + } + + newcap := old.cap + if newcap+newcap < cap { + newcap = cap + } else { + for { + if old.len < 1024 { + newcap += newcap + } else { + newcap += newcap / 4 + } + if newcap >= cap { + break + } + } + } + + if uintptr(newcap) >= maxmem/uintptr(et.size) { + panic(errorString("growslice: cap out of range")) + } + lenmem := uintptr(old.len) * uintptr(et.size) + capmem := goroundupsize(uintptr(newcap) * uintptr(et.size)) + newcap = int(capmem / uintptr(et.size)) + var p unsafe.Pointer + if et.kind&kindNoPointers != 0 { + p = rawmem(capmem) + memclr(add(p, lenmem), capmem-lenmem) + } else { + // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan unitialized memory + p = newarray(et, uintptr(newcap)) + } + memmove(p, old.array, lenmem) + + return sliceStruct{p, old.len, newcap} +} + +func slicecopy(to sliceStruct, fm sliceStruct, width uintptr) int { + if fm.len == 0 || to.len == 0 || width == 0 { + return 0 + } + + n := fm.len + if to.len < n { + n = to.len + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&to)) + pc := funcPC(slicecopy) + racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc) + racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc) + } + + size := uintptr(n) * width + if size == 1 { // common case worth about 2x to do here + // TODO: is this still worth it with new memmove impl? + *(*byte)(to.array) = *(*byte)(fm.array) // known to be a byte pointer + } else { + memmove(to.array, fm.array, size) + } + return int(n) +} + +func slicestringcopy(to []byte, fm string) int { + if len(fm) == 0 || len(to) == 0 { + return 0 + } + + n := len(fm) + if len(to) < n { + n = len(to) + } + + if raceenabled { + callerpc := getcallerpc(unsafe.Pointer(&to)) + pc := funcPC(slicestringcopy) + racewriterangepc(unsafe.Pointer(&to[0]), uintptr(n), callerpc, pc) + } + + memmove(unsafe.Pointer(&to[0]), unsafe.Pointer((*stringStruct)(unsafe.Pointer(&fm)).str), uintptr(n)) + return n +} diff --git a/libgo/go/runtime/stack.go b/libgo/go/runtime/stack.go new file mode 100644 index 0000000..f1b7d32 --- /dev/null +++ b/libgo/go/runtime/stack.go @@ -0,0 +1,13 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const ( + // Goroutine preemption request. + // Stored into g->stackguard0 to cause split stack check failure. + // Must be greater than any real sp. + // 0xfffffade in hex. + stackPreempt = ^uintptr(1313) +) diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go new file mode 100644 index 0000000..0809f89 --- /dev/null +++ b/libgo/go/runtime/string.go @@ -0,0 +1,298 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +func concatstrings(a []string) string { + idx := 0 + l := 0 + count := 0 + for i, x := range a { + n := len(x) + if n == 0 { + continue + } + if l+n < l { + gothrow("string concatenation too long") + } + l += n + count++ + idx = i + } + if count == 0 { + return "" + } + if count == 1 { + return a[idx] + } + s, b := rawstring(l) + l = 0 + for _, x := range a { + copy(b[l:], x) + l += len(x) + } + return s +} + +//go:nosplit +func concatstring2(a [2]string) string { + return concatstrings(a[:]) +} + +//go:nosplit +func concatstring3(a [3]string) string { + return concatstrings(a[:]) +} + +//go:nosplit +func concatstring4(a [4]string) string { + return concatstrings(a[:]) +} + +//go:nosplit +func concatstring5(a [5]string) string { + return concatstrings(a[:]) +} + +func slicebytetostring(b []byte) string { + if raceenabled && len(b) > 0 { + racereadrangepc(unsafe.Pointer(&b[0]), + uintptr(len(b)), + getcallerpc(unsafe.Pointer(&b)), + funcPC(slicebytetostring)) + } + s, c := rawstring(len(b)) + copy(c, b) + return s +} + +func slicebytetostringtmp(b []byte) string { + // Return a "string" referring to the actual []byte bytes. + // This is only for use by internal compiler optimizations + // that know that the string form will be discarded before + // the calling goroutine could possibly modify the original + // slice or synchronize with another goroutine. + // Today, the only such case is a m[string(k)] lookup where + // m is a string-keyed map and k is a []byte. + + if raceenabled && len(b) > 0 { + racereadrangepc(unsafe.Pointer(&b[0]), + uintptr(len(b)), + getcallerpc(unsafe.Pointer(&b)), + funcPC(slicebytetostringtmp)) + } + return *(*string)(unsafe.Pointer(&b)) +} + +func stringtoslicebyte(s string) []byte { + b := rawbyteslice(len(s)) + copy(b, s) + return b +} + +func stringtoslicerune(s string) []rune { + // two passes. + // unlike slicerunetostring, no race because strings are immutable. + n := 0 + t := s + for len(s) > 0 { + _, k := charntorune(s) + s = s[k:] + n++ + } + a := rawruneslice(n) + n = 0 + for len(t) > 0 { + r, k := charntorune(t) + t = t[k:] + a[n] = r + n++ + } + return a +} + +func slicerunetostring(a []rune) string { + if raceenabled && len(a) > 0 { + racereadrangepc(unsafe.Pointer(&a[0]), + uintptr(len(a))*unsafe.Sizeof(a[0]), + getcallerpc(unsafe.Pointer(&a)), + funcPC(slicerunetostring)) + } + var dum [4]byte + size1 := 0 + for _, r := range a { + size1 += runetochar(dum[:], r) + } + s, b := rawstring(size1 + 3) + size2 := 0 + for _, r := range a { + // check for race + if size2 >= size1 { + break + } + size2 += runetochar(b[size2:], r) + } + return s[:size2] +} + +type stringStruct struct { + str unsafe.Pointer + len int +} + +func intstring(v int64) string { + s, b := rawstring(4) + n := runetochar(b, rune(v)) + return s[:n] +} + +// stringiter returns the index of the next +// rune after the rune that starts at s[k]. +func stringiter(s string, k int) int { + if k >= len(s) { + // 0 is end of iteration + return 0 + } + + c := s[k] + if c < runeself { + return k + 1 + } + + // multi-char rune + _, n := charntorune(s[k:]) + return k + n +} + +// stringiter2 returns the rune that starts at s[k] +// and the index where the next rune starts. +func stringiter2(s string, k int) (int, rune) { + if k >= len(s) { + // 0 is end of iteration + return 0, 0 + } + + c := s[k] + if c < runeself { + return k + 1, rune(c) + } + + // multi-char rune + r, n := charntorune(s[k:]) + return k + n, r +} + +// rawstring allocates storage for a new string. The returned +// string and byte slice both refer to the same storage. +// The storage is not zeroed. Callers should use +// b to set the string contents and then drop b. +func rawstring(size int) (s string, b []byte) { + p := mallocgc(uintptr(size), nil, flagNoScan|flagNoZero) + + (*stringStruct)(unsafe.Pointer(&s)).str = p + (*stringStruct)(unsafe.Pointer(&s)).len = size + + (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p) + (*slice)(unsafe.Pointer(&b)).len = uint(size) + (*slice)(unsafe.Pointer(&b)).cap = uint(size) + + for { + ms := maxstring + if uintptr(size) <= uintptr(ms) || casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) { + return + } + } +} + +// rawbyteslice allocates a new byte slice. The byte slice is not zeroed. +func rawbyteslice(size int) (b []byte) { + cap := goroundupsize(uintptr(size)) + p := mallocgc(cap, nil, flagNoScan|flagNoZero) + if cap != uintptr(size) { + memclr(add(p, uintptr(size)), cap-uintptr(size)) + } + + (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p) + (*slice)(unsafe.Pointer(&b)).len = uint(size) + (*slice)(unsafe.Pointer(&b)).cap = uint(cap) + return +} + +// rawruneslice allocates a new rune slice. The rune slice is not zeroed. +func rawruneslice(size int) (b []rune) { + if uintptr(size) > maxmem/4 { + gothrow("out of memory") + } + mem := goroundupsize(uintptr(size) * 4) + p := mallocgc(mem, nil, flagNoScan|flagNoZero) + if mem != uintptr(size)*4 { + memclr(add(p, uintptr(size)*4), mem-uintptr(size)*4) + } + + (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p) + (*slice)(unsafe.Pointer(&b)).len = uint(size) + (*slice)(unsafe.Pointer(&b)).cap = uint(mem / 4) + return +} + +// used by cmd/cgo +func gobytes(p *byte, n int) []byte { + if n == 0 { + return make([]byte, 0) + } + x := make([]byte, n) + memmove(unsafe.Pointer(&x[0]), unsafe.Pointer(p), uintptr(n)) + return x +} + +func gostringsize(n int) string { + s, _ := rawstring(n) + return s +} + +//go:noescape +func findnull(*byte) int + +func gostring(p *byte) string { + l := findnull(p) + if l == 0 { + return "" + } + s, b := rawstring(l) + memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l)) + return s +} + +func gostringn(p *byte, l int) string { + if l == 0 { + return "" + } + s, b := rawstring(l) + memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l)) + return s +} + +func index(s, t string) int { + if len(t) == 0 { + return 0 + } + for i := 0; i < len(s); i++ { + if s[i] == t[0] && hasprefix(s[i:], t) { + return i + } + } + return -1 +} + +func contains(s, t string) bool { + return index(s, t) >= 0 +} + +func hasprefix(s, t string) bool { + return len(s) >= len(t) && s[:len(t)] == t +} diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go new file mode 100644 index 0000000..fe8f9c9 --- /dev/null +++ b/libgo/go/runtime/stubs.go @@ -0,0 +1,316 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// Declarations for runtime services implemented in C or assembly. + +const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const +const regSize = 4 << (^uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const + +// Should be a built-in for unsafe.Pointer? +//go:nosplit +func add(p unsafe.Pointer, x uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(p) + x) +} + +// n must be a power of 2 +func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer { + delta := -uintptr(p) & (n - 1) + return unsafe.Pointer(uintptr(p) + delta) +} + +// in runtime.c +func getg() *g +func acquirem() *m +func releasem(mp *m) +func gomcache() *mcache +func readgstatus(*g) uint32 // proc.c + +// mcall switches from the g to the g0 stack and invokes fn(g), +// where g is the goroutine that made the call. +// mcall saves g's current PC/SP in g->sched so that it can be restored later. +// It is up to fn to arrange for that later execution, typically by recording +// g in a data structure, causing something to call ready(g) later. +// mcall returns to the original goroutine g later, when g has been rescheduled. +// fn must not return at all; typically it ends by calling schedule, to let the m +// run other goroutines. +// +// mcall can only be called from g stacks (not g0, not gsignal). +//go:noescape +func mcall(fn func(*g)) + +// onM switches from the g to the g0 stack and invokes fn(). +// When fn returns, onM switches back to the g and returns, +// continuing execution on the g stack. +// If arguments must be passed to fn, they can be written to +// g->m->ptrarg (pointers) and g->m->scalararg (non-pointers) +// before the call and then consulted during fn. +// Similarly, fn can pass return values back in those locations. +// If fn is written in Go, it can be a closure, which avoids the need for +// ptrarg and scalararg entirely. +// After reading values out of ptrarg and scalararg it is conventional +// to zero them to avoid (memory or information) leaks. +// +// If onM is called from a g0 stack, it invokes fn and returns, +// without any stack switches. +// +// If onM is called from a gsignal stack, it crashes the program. +// The implication is that functions used in signal handlers must +// not use onM. +// +// NOTE(rsc): We could introduce a separate onMsignal that is +// like onM but if called from a gsignal stack would just run fn on +// that stack. The caller of onMsignal would be required to save the +// old values of ptrarg/scalararg and restore them when the call +// was finished, in case the signal interrupted an onM sequence +// in progress on the g or g0 stacks. Until there is a clear need for this, +// we just reject onM in signal handling contexts entirely. +// +//go:noescape +func onM(fn func()) + +// onMsignal is like onM but is allowed to be used in code that +// might run on the gsignal stack. Code running on a signal stack +// may be interrupting an onM sequence on the main stack, so +// if the onMsignal calling sequence writes to ptrarg/scalararg, +// it must first save the old values and then restore them when +// finished. As an exception to the rule, it is fine not to save and +// restore the values if the program is trying to crash rather than +// return from the signal handler. +// Once all the runtime is written in Go, there will be no ptrarg/scalararg +// and the distinction between onM and onMsignal (and perhaps mcall) +// can go away. +// +// If onMsignal is called from a gsignal stack, it invokes fn directly, +// without a stack switch. Otherwise onMsignal behaves like onM. +// +//go:noescape +func onM_signalok(fn func()) + +func badonm() { + gothrow("onM called from signal goroutine") +} + +// C functions that run on the M stack. +// Call using mcall. +func gosched_m(*g) +func park_m(*g) +func recovery_m(*g) + +// More C functions that run on the M stack. +// Call using onM. +func mcacheRefill_m() +func largeAlloc_m() +func gc_m() +func scavenge_m() +func setFinalizer_m() +func removeFinalizer_m() +func markallocated_m() +func unrollgcprog_m() +func unrollgcproginplace_m() +func setgcpercent_m() +func setmaxthreads_m() +func ready_m() +func deferproc_m() +func goexit_m() +func startpanic_m() +func dopanic_m() +func readmemstats_m() +func writeheapdump_m() + +// memclr clears n bytes starting at ptr. +// in memclr_*.s +//go:noescape +func memclr(ptr unsafe.Pointer, n uintptr) + +// memmove copies n bytes from "from" to "to". +// in memmove_*.s +//go:noescape +func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr) + +func starttheworld() +func stoptheworld() +func newextram() +func lockOSThread() +func unlockOSThread() + +// exported value for testing +var hashLoad = loadFactor + +// in asm_*.s +func fastrand1() uint32 + +// in asm_*.s +//go:noescape +func memeq(a, b unsafe.Pointer, size uintptr) bool + +// noescape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to a single xor instruction. +// USE CAREFULLY! +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func entersyscall() +func reentersyscall(pc uintptr, sp unsafe.Pointer) +func entersyscallblock() +func exitsyscall() + +func cgocallback(fn, frame unsafe.Pointer, framesize uintptr) +func gogo(buf *gobuf) +func gosave(buf *gobuf) +func read(fd int32, p unsafe.Pointer, n int32) int32 +func close(fd int32) int32 +func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32 + +//go:noescape +func jmpdefer(fv *funcval, argp uintptr) +func exit1(code int32) +func asminit() +func setg(gg *g) +func exit(code int32) +func breakpoint() +func nanotime() int64 +func usleep(usec uint32) + +// careful: cputicks is not guaranteed to be monotonic! In particular, we have +// noticed drift between cpus on certain os/arch combinations. See issue 8976. +func cputicks() int64 + +func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer +func munmap(addr unsafe.Pointer, n uintptr) +func madvise(addr unsafe.Pointer, n uintptr, flags int32) +func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32) +func osyield() +func procyield(cycles uint32) +func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr) +func readgogc() int32 +func purgecachedstats(c *mcache) +func gostringnocopy(b *byte) string +func goexit() + +//go:noescape +func write(fd uintptr, p unsafe.Pointer, n int32) int32 + +//go:noescape +func cas(ptr *uint32, old, new uint32) bool + +//go:noescape +func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool + +//go:noescape +func casuintptr(ptr *uintptr, old, new uintptr) bool + +//go:noescape +func atomicstoreuintptr(ptr *uintptr, new uintptr) + +//go:noescape +func atomicloaduintptr(ptr *uintptr) uintptr + +//go:noescape +func atomicloaduint(ptr *uint) uint + +//go:noescape +func setcallerpc(argp unsafe.Pointer, pc uintptr) + +// getcallerpc returns the program counter (PC) of its caller's caller. +// getcallersp returns the stack pointer (SP) of its caller's caller. +// For both, the argp must be a pointer to the caller's first function argument. +// The implementation may or may not use argp, depending on +// the architecture. +// +// For example: +// +// func f(arg1, arg2, arg3 int) { +// pc := getcallerpc(unsafe.Pointer(&arg1)) +// sp := getcallerpc(unsafe.Pointer(&arg2)) +// } +// +// These two lines find the PC and SP immediately following +// the call to f (where f will return). +// +// The call to getcallerpc and getcallersp must be done in the +// frame being asked about. It would not be correct for f to pass &arg1 +// to another function g and let g call getcallerpc/getcallersp. +// The call inside g might return information about g's caller or +// information about f's caller or complete garbage. +// +// The result of getcallersp is correct at the time of the return, +// but it may be invalidated by any subsequent call to a function +// that might relocate the stack in order to grow or shrink it. +// A general rule is that the result of getcallersp should be used +// immediately and can only be passed to nosplit functions. + +//go:noescape +func getcallerpc(argp unsafe.Pointer) uintptr + +//go:noescape +func getcallersp(argp unsafe.Pointer) uintptr + +//go:noescape +func asmcgocall(fn, arg unsafe.Pointer) + +//go:noescape +func asmcgocall_errno(fn, arg unsafe.Pointer) int32 + +//go:noescape +func open(name *byte, mode, perm int32) int32 + +//go:noescape +func gotraceback(*bool) int32 + +const _NoArgs = ^uintptr(0) + +func newstack() +func newproc() +func morestack() +func mstart() +func rt0_go() + +// return0 is a stub used to return 0 from deferproc. +// It is called at the very end of deferproc to signal +// the calling Go function that it should not jump +// to deferreturn. +// in asm_*.s +func return0() + +// thunk to call time.now. +func timenow() (sec int64, nsec int32) + +// in asm_*.s +// not called directly; definitions here supply type information for traceback. +func call16(fn, arg unsafe.Pointer, n, retoffset uint32) +func call32(fn, arg unsafe.Pointer, n, retoffset uint32) +func call64(fn, arg unsafe.Pointer, n, retoffset uint32) +func call128(fn, arg unsafe.Pointer, n, retoffset uint32) +func call256(fn, arg unsafe.Pointer, n, retoffset uint32) +func call512(fn, arg unsafe.Pointer, n, retoffset uint32) +func call1024(fn, arg unsafe.Pointer, n, retoffset uint32) +func call2048(fn, arg unsafe.Pointer, n, retoffset uint32) +func call4096(fn, arg unsafe.Pointer, n, retoffset uint32) +func call8192(fn, arg unsafe.Pointer, n, retoffset uint32) +func call16384(fn, arg unsafe.Pointer, n, retoffset uint32) +func call32768(fn, arg unsafe.Pointer, n, retoffset uint32) +func call65536(fn, arg unsafe.Pointer, n, retoffset uint32) +func call131072(fn, arg unsafe.Pointer, n, retoffset uint32) +func call262144(fn, arg unsafe.Pointer, n, retoffset uint32) +func call524288(fn, arg unsafe.Pointer, n, retoffset uint32) +func call1048576(fn, arg unsafe.Pointer, n, retoffset uint32) +func call2097152(fn, arg unsafe.Pointer, n, retoffset uint32) +func call4194304(fn, arg unsafe.Pointer, n, retoffset uint32) +func call8388608(fn, arg unsafe.Pointer, n, retoffset uint32) +func call16777216(fn, arg unsafe.Pointer, n, retoffset uint32) +func call33554432(fn, arg unsafe.Pointer, n, retoffset uint32) +func call67108864(fn, arg unsafe.Pointer, n, retoffset uint32) +func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32) +func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32) +func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32) +func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32) diff --git a/libgo/go/runtime/syscall_windows.go b/libgo/go/runtime/syscall_windows.go new file mode 100644 index 0000000..efbcab5 --- /dev/null +++ b/libgo/go/runtime/syscall_windows.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import ( + "unsafe" +) + +type callbacks struct { + lock mutex + ctxt [cb_max]*wincallbackcontext + n int +} + +func (c *wincallbackcontext) isCleanstack() bool { + return c.cleanstack +} + +func (c *wincallbackcontext) setCleanstack(cleanstack bool) { + c.cleanstack = cleanstack +} + +var ( + cbs callbacks + cbctxts **wincallbackcontext = &cbs.ctxt[0] // to simplify access to cbs.ctxt in sys_windows_*.s + + callbackasm byte // type isn't really byte, it's code in runtime +) + +// callbackasmAddr returns address of runtime.callbackasm +// function adjusted by i. +// runtime.callbackasm is just a series of CALL instructions +// (each is 5 bytes long), and we want callback to arrive at +// correspondent call instruction instead of start of +// runtime.callbackasm. +func callbackasmAddr(i int) uintptr { + return uintptr(add(unsafe.Pointer(&callbackasm), uintptr(i*5))) +} + +func compileCallback(fn eface, cleanstack bool) (code uintptr) { + if fn._type == nil || (fn._type.kind&kindMask) != kindFunc { + panic("compilecallback: not a function") + } + ft := (*functype)(unsafe.Pointer(fn._type)) + if len(ft.out) != 1 { + panic("compilecallback: function must have one output parameter") + } + uintptrSize := unsafe.Sizeof(uintptr(0)) + if t := (**_type)(unsafe.Pointer(&ft.out[0])); (*t).size != uintptrSize { + panic("compilecallback: output parameter size is wrong") + } + argsize := uintptr(0) + for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] { + if (*t).size > uintptrSize { + panic("compilecallback: input parameter size is wrong") + } + argsize += uintptrSize + } + + lock(&cbs.lock) + defer unlock(&cbs.lock) + + n := cbs.n + for i := 0; i < n; i++ { + if cbs.ctxt[i].gobody == fn.data && cbs.ctxt[i].isCleanstack() == cleanstack { + return callbackasmAddr(i) + } + } + if n >= cb_max { + gothrow("too many callback functions") + } + + c := new(wincallbackcontext) + c.gobody = fn.data + c.argsize = argsize + c.setCleanstack(cleanstack) + if cleanstack && argsize != 0 { + c.restorestack = argsize + } else { + c.restorestack = 0 + } + cbs.ctxt[n] = c + cbs.n++ + + return callbackasmAddr(n) +} + +func getLoadLibrary() uintptr + +//go:nosplit +func syscall_loadlibrary(filename *uint16) (handle, err uintptr) { + var c libcall + c.fn = getLoadLibrary() + c.n = 1 + c.args = uintptr(unsafe.Pointer(&filename)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + handle = c.r1 + if handle == 0 { + err = c.err + } + return +} + +func getGetProcAddress() uintptr + +//go:nosplit +func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) { + var c libcall + c.fn = getGetProcAddress() + c.n = 2 + c.args = uintptr(unsafe.Pointer(&handle)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + outhandle = c.r1 + if outhandle == 0 { + err = c.err + } + return +} + +//go:nosplit +func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} + +//go:nosplit +func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { + var c libcall + c.fn = fn + c.n = nargs + c.args = uintptr(unsafe.Pointer(&a1)) + cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c)) + return c.r1, c.r2, c.err +} diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go new file mode 100644 index 0000000..11862c7 --- /dev/null +++ b/libgo/go/runtime/time.go @@ -0,0 +1,289 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Time-related runtime and pieces of package time. + +package runtime + +import "unsafe" + +// Package time knows the layout of this structure. +// If this struct changes, adjust ../time/sleep.go:/runtimeTimer. +// For GOOS=nacl, package syscall knows the layout of this structure. +// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer. +type timer struct { + i int // heap index + + // Timer wakes up at when, and then at when+period, ... (period > 0 only) + // each time calling f(now, arg) in the timer goroutine, so f must be + // a well-behaved function and not block. + when int64 + period int64 + f func(interface{}, uintptr) + arg interface{} + seq uintptr +} + +var timers struct { + lock mutex + gp *g + created bool + sleeping bool + rescheduling bool + waitnote note + t []*timer +} + +// nacl fake time support - time in nanoseconds since 1970 +var faketime int64 + +// Package time APIs. +// Godoc uses the comments in package time, not these. + +// time.now is implemented in assembly. + +// Sleep puts the current goroutine to sleep for at least ns nanoseconds. +func timeSleep(ns int64) { + if ns <= 0 { + return + } + + t := new(timer) + t.when = nanotime() + ns + t.f = goroutineReady + t.arg = getg() + lock(&timers.lock) + addtimerLocked(t) + goparkunlock(&timers.lock, "sleep") +} + +// startTimer adds t to the timer heap. +func startTimer(t *timer) { + if raceenabled { + racerelease(unsafe.Pointer(t)) + } + addtimer(t) +} + +// stopTimer removes t from the timer heap if it is there. +// It returns true if t was removed, false if t wasn't even there. +func stopTimer(t *timer) bool { + return deltimer(t) +} + +// Go runtime. + +// Ready the goroutine arg. +func goroutineReady(arg interface{}, seq uintptr) { + goready(arg.(*g)) +} + +func addtimer(t *timer) { + lock(&timers.lock) + addtimerLocked(t) + unlock(&timers.lock) +} + +// Add a timer to the heap and start or kick the timer proc. +// If the new timer is earlier than any of the others. +// Timers are locked. +func addtimerLocked(t *timer) { + // when must never be negative; otherwise timerproc will overflow + // during its delta calculation and never expire other runtime·timers. + if t.when < 0 { + t.when = 1<<63 - 1 + } + t.i = len(timers.t) + timers.t = append(timers.t, t) + siftupTimer(t.i) + if t.i == 0 { + // siftup moved to top: new earliest deadline. + if timers.sleeping { + timers.sleeping = false + notewakeup(&timers.waitnote) + } + if timers.rescheduling { + timers.rescheduling = false + goready(timers.gp) + } + } + if !timers.created { + timers.created = true + go timerproc() + } +} + +// Delete timer t from the heap. +// Do not need to update the timerproc: if it wakes up early, no big deal. +func deltimer(t *timer) bool { + // Dereference t so that any panic happens before the lock is held. + // Discard result, because t might be moving in the heap. + _ = t.i + + lock(&timers.lock) + // t may not be registered anymore and may have + // a bogus i (typically 0, if generated by Go). + // Verify it before proceeding. + i := t.i + last := len(timers.t) - 1 + if i < 0 || i > last || timers.t[i] != t { + unlock(&timers.lock) + return false + } + if i != last { + timers.t[i] = timers.t[last] + timers.t[i].i = i + } + timers.t[last] = nil + timers.t = timers.t[:last] + if i != last { + siftupTimer(i) + siftdownTimer(i) + } + unlock(&timers.lock) + return true +} + +// Timerproc runs the time-driven events. +// It sleeps until the next event in the timers heap. +// If addtimer inserts a new earlier event, addtimer1 wakes timerproc early. +func timerproc() { + timers.gp = getg() + timers.gp.issystem = true + for { + lock(&timers.lock) + timers.sleeping = false + now := nanotime() + delta := int64(-1) + for { + if len(timers.t) == 0 { + delta = -1 + break + } + t := timers.t[0] + delta = t.when - now + if delta > 0 { + break + } + if t.period > 0 { + // leave in heap but adjust next time to fire + t.when += t.period * (1 + -delta/t.period) + siftdownTimer(0) + } else { + // remove from heap + last := len(timers.t) - 1 + if last > 0 { + timers.t[0] = timers.t[last] + timers.t[0].i = 0 + } + timers.t[last] = nil + timers.t = timers.t[:last] + if last > 0 { + siftdownTimer(0) + } + t.i = -1 // mark as removed + } + f := t.f + arg := t.arg + seq := t.seq + unlock(&timers.lock) + if raceenabled { + raceacquire(unsafe.Pointer(t)) + } + f(arg, seq) + lock(&timers.lock) + } + if delta < 0 || faketime > 0 { + // No timers left - put goroutine to sleep. + timers.rescheduling = true + goparkunlock(&timers.lock, "timer goroutine (idle)") + continue + } + // At least one timer pending. Sleep until then. + timers.sleeping = true + noteclear(&timers.waitnote) + unlock(&timers.lock) + notetsleepg(&timers.waitnote, delta) + } +} + +func timejump() *g { + if faketime == 0 { + return nil + } + + lock(&timers.lock) + if !timers.created || len(timers.t) == 0 { + unlock(&timers.lock) + return nil + } + + var gp *g + if faketime < timers.t[0].when { + faketime = timers.t[0].when + if timers.rescheduling { + timers.rescheduling = false + gp = timers.gp + } + } + unlock(&timers.lock) + return gp +} + +// Heap maintenance algorithms. + +func siftupTimer(i int) { + t := timers.t + when := t[i].when + tmp := t[i] + for i > 0 { + p := (i - 1) / 4 // parent + if when >= t[p].when { + break + } + t[i] = t[p] + t[i].i = i + t[p] = tmp + t[p].i = p + i = p + } +} + +func siftdownTimer(i int) { + t := timers.t + n := len(t) + when := t[i].when + tmp := t[i] + for { + c := i*4 + 1 // left child + c3 := c + 2 // mid child + if c >= n { + break + } + w := t[c].when + if c+1 < n && t[c+1].when < w { + w = t[c+1].when + c++ + } + if c3 < n { + w3 := t[c3].when + if c3+1 < n && t[c3+1].when < w3 { + w3 = t[c3+1].when + c3++ + } + if w3 < w { + w = w3 + c = c3 + } + } + if w >= when { + break + } + t[i] = t[c] + t[i].i = i + t[c] = tmp + t[c].i = c + i = c + } +} diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go deleted file mode 100644 index a5ed8af..0000000 --- a/libgo/go/runtime/type.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - * Runtime type representation. - * This file exists only to provide types that 6l can turn into - * DWARF information for use by gdb. Nothing else uses these. - * They should match the same types in ../reflect/type.go. - * For comments see ../reflect/type.go. - */ - -package runtime - -import "unsafe" - -type rtype struct { - kind uint8 - align uint8 - fieldAlign uint8 - size uintptr - hash uint32 - - hashfn func(unsafe.Pointer, uintptr) uintptr - equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool - - gc unsafe.Pointer - string *string - *uncommonType - ptrToThis *rtype - zero unsafe.Pointer -} - -type _method struct { - name *string - pkgPath *string - mtyp *rtype - typ *rtype - tfn unsafe.Pointer -} - -type uncommonType struct { - name *string - pkgPath *string - methods []_method -} - -type _imethod struct { - name *string - pkgPath *string - typ *rtype -} - -type interfaceType struct { - rtype - methods []_imethod -} diff --git a/libgo/go/runtime/typekind.go b/libgo/go/runtime/typekind.go new file mode 100644 index 0000000..b64ec44 --- /dev/null +++ b/libgo/go/runtime/typekind.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +const ( + kindBool = _KindBool + kindInt = _KindInt + kindInt8 = _KindInt8 + kindInt16 = _KindInt16 + kindInt32 = _KindInt32 + kindInt64 = _KindInt64 + kindUint = _KindUint + kindUint8 = _KindUint8 + kindUint16 = _KindUint16 + kindUint32 = _KindUint32 + kindUint64 = _KindUint64 + kindUintptr = _KindUintptr + kindFloat32 = _KindFloat32 + kindFloat64 = _KindFloat64 + kindComplex64 = _KindComplex64 + kindComplex128 = _KindComplex128 + kindArray = _KindArray + kindChan = _KindChan + kindFunc = _KindFunc + kindInterface = _KindInterface + kindMap = _KindMap + kindPtr = _KindPtr + kindSlice = _KindSlice + kindString = _KindString + kindStruct = _KindStruct + kindUnsafePointer = _KindUnsafePointer + + kindDirectIface = _KindDirectIface + kindGCProg = _KindGCProg + kindNoPointers = _KindNoPointers + kindMask = _KindMask +) + +// isDirectIface reports whether t is stored directly in an interface value. +func isDirectIface(t *_type) bool { + return t.kind&kindDirectIface != 0 +} diff --git a/libgo/go/runtime/vlrt.go b/libgo/go/runtime/vlrt.go new file mode 100644 index 0000000..6370732 --- /dev/null +++ b/libgo/go/runtime/vlrt.go @@ -0,0 +1,258 @@ +// Inferno's libkern/vlrt-arm.c +// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-arm.c +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved. +// Portions Copyright 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// +build arm 386 + +package runtime + +import "unsafe" + +const ( + sign32 = 1 << (32 - 1) + sign64 = 1 << (64 - 1) +) + +func float64toint64(d float64) (y uint64) { + _d2v(&y, d) + return +} + +func float64touint64(d float64) (y uint64) { + _d2v(&y, d) + return +} + +func int64tofloat64(y int64) float64 { + if y < 0 { + return -uint64tofloat64(-uint64(y)) + } + return uint64tofloat64(uint64(y)) +} + +func uint64tofloat64(y uint64) float64 { + hi := float64(uint32(y >> 32)) + lo := float64(uint32(y)) + d := hi*(1<<32) + lo + return d +} + +func _d2v(y *uint64, d float64) { + x := *(*uint64)(unsafe.Pointer(&d)) + + xhi := uint32(x>>32)&0xfffff | 0x100000 + xlo := uint32(x) + sh := 1075 - int32(uint32(x>>52)&0x7ff) + + var ylo, yhi uint32 + if sh >= 0 { + sh := uint32(sh) + /* v = (hi||lo) >> sh */ + if sh < 32 { + if sh == 0 { + ylo = xlo + yhi = xhi + } else { + ylo = xlo>>sh | xhi<<(32-sh) + yhi = xhi >> sh + } + } else { + if sh == 32 { + ylo = xhi + } else if sh < 64 { + ylo = xhi >> (sh - 32) + } + } + } else { + /* v = (hi||lo) << -sh */ + sh := uint32(-sh) + if sh <= 11 { + ylo = xlo << sh + yhi = xhi<<sh | xlo>>(32-sh) + } else { + /* overflow */ + yhi = uint32(d) /* causes something awful */ + } + } + if x&sign64 != 0 { + if ylo != 0 { + ylo = -ylo + yhi = ^yhi + } else { + yhi = -yhi + } + } + + *y = uint64(yhi)<<32 | uint64(ylo) +} + +func uint64div(n, d uint64) uint64 { + // Check for 32 bit operands + if uint32(n>>32) == 0 && uint32(d>>32) == 0 { + if uint32(d) == 0 { + panicdivide() + } + return uint64(uint32(n) / uint32(d)) + } + q, _ := dodiv(n, d) + return q +} + +func uint64mod(n, d uint64) uint64 { + // Check for 32 bit operands + if uint32(n>>32) == 0 && uint32(d>>32) == 0 { + if uint32(d) == 0 { + panicdivide() + } + return uint64(uint32(n) % uint32(d)) + } + _, r := dodiv(n, d) + return r +} + +func int64div(n, d int64) int64 { + // Check for 32 bit operands + if int64(int32(n)) == n && int64(int32(d)) == d { + if int32(n) == -0x80000000 && int32(d) == -1 { + // special case: 32-bit -0x80000000 / -1 = -0x80000000, + // but 64-bit -0x80000000 / -1 = 0x80000000. + return 0x80000000 + } + if int32(d) == 0 { + panicdivide() + } + return int64(int32(n) / int32(d)) + } + + nneg := n < 0 + dneg := d < 0 + if nneg { + n = -n + } + if dneg { + d = -d + } + uq, _ := dodiv(uint64(n), uint64(d)) + q := int64(uq) + if nneg != dneg { + q = -q + } + return q +} + +func int64mod(n, d int64) int64 { + // Check for 32 bit operands + if int64(int32(n)) == n && int64(int32(d)) == d { + if int32(d) == 0 { + panicdivide() + } + return int64(int32(n) % int32(d)) + } + + nneg := n < 0 + if nneg { + n = -n + } + if d < 0 { + d = -d + } + _, ur := dodiv(uint64(n), uint64(d)) + r := int64(ur) + if nneg { + r = -r + } + return r +} + +//go:noescape +func _mul64by32(lo64 *uint64, a uint64, b uint32) (hi32 uint32) + +//go:noescape +func _div64by32(a uint64, b uint32, r *uint32) (q uint32) + +func dodiv(n, d uint64) (q, r uint64) { + if GOARCH == "arm" { + // arm doesn't have a division instruction, so + // slowdodiv is the best that we can do. + // TODO: revisit for arm64. + return slowdodiv(n, d) + } + + if d > n { + return 0, n + } + + if uint32(d>>32) != 0 { + t := uint32(n>>32) / uint32(d>>32) + var lo64 uint64 + hi32 := _mul64by32(&lo64, d, t) + if hi32 != 0 || lo64 > n { + return slowdodiv(n, d) + } + return uint64(t), n - lo64 + } + + // d is 32 bit + var qhi uint32 + if uint32(n>>32) >= uint32(d) { + if uint32(d) == 0 { + panicdivide() + } + qhi = uint32(n>>32) / uint32(d) + n -= uint64(uint32(d)*qhi) << 32 + } else { + qhi = 0 + } + + var rlo uint32 + qlo := _div64by32(n, uint32(d), &rlo) + return uint64(qhi)<<32 + uint64(qlo), uint64(rlo) +} + +func slowdodiv(n, d uint64) (q, r uint64) { + if d == 0 { + panicdivide() + } + + // Set up the divisor and find the number of iterations needed. + capn := n + if n >= sign64 { + capn = sign64 + } + i := 0 + for d < capn { + d <<= 1 + i++ + } + + for ; i >= 0; i-- { + q <<= 1 + if n >= d { + n -= d + q |= 1 + } + d >>= 1 + } + return q, n +} |