aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2015-10-31 00:59:47 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2015-10-31 00:59:47 +0000
commitaf146490bb04205107cb23e301ec7a8ff927b5fc (patch)
tree13beeaed3698c61903fe93fb1ce70bd9b18d4e7f /libgo/go/runtime
parent725e1be3406315d9bcc8195d7eef0a7082b3c7cc (diff)
downloadgcc-af146490bb04205107cb23e301ec7a8ff927b5fc.zip
gcc-af146490bb04205107cb23e301ec7a8ff927b5fc.tar.gz
gcc-af146490bb04205107cb23e301ec7a8ff927b5fc.tar.bz2
runtime: Remove now unnecessary pad field from ParFor.
It is not needed due to the removal of the ctx field. Reviewed-on: https://go-review.googlesource.com/16525 From-SVN: r229616
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/arch_arm.go8
-rw-r--r--libgo/go/runtime/atomic.go51
-rw-r--r--libgo/go/runtime/cgocall.go279
-rw-r--r--libgo/go/runtime/cgocallback.go40
-rw-r--r--libgo/go/runtime/chan.go655
-rw-r--r--libgo/go/runtime/chan_test.go104
-rw-r--r--libgo/go/runtime/chanbarrier_test.go83
-rw-r--r--libgo/go/runtime/compiler.go2
-rw-r--r--libgo/go/runtime/complex.go52
-rw-r--r--libgo/go/runtime/cpuprof.go425
-rw-r--r--libgo/go/runtime/crash_cgo_test.go237
-rw-r--r--libgo/go/runtime/crash_test.go105
-rw-r--r--libgo/go/runtime/crash_unix_test.go135
-rw-r--r--libgo/go/runtime/debug/garbage.go2
-rw-r--r--libgo/go/runtime/debug/garbage_test.go4
-rw-r--r--libgo/go/runtime/debug/heapdump_test.go36
-rw-r--r--libgo/go/runtime/debug/stack.go2
-rw-r--r--libgo/go/runtime/env_posix.go58
-rw-r--r--libgo/go/runtime/env_test.go47
-rw-r--r--libgo/go/runtime/error.go4
-rw-r--r--libgo/go/runtime/export_arm_test.go (renamed from libgo/go/runtime/arch_386.go)7
-rw-r--r--libgo/go/runtime/export_linux_test.go (renamed from libgo/go/runtime/arch_amd64p32.go)7
-rw-r--r--libgo/go/runtime/export_test.go168
-rw-r--r--libgo/go/runtime/export_windows_test.go (renamed from libgo/go/runtime/arch_amd64.go)7
-rw-r--r--libgo/go/runtime/extern.go59
-rw-r--r--libgo/go/runtime/gc_test.go258
-rw-r--r--libgo/go/runtime/gcinfo_test.go138
-rw-r--r--libgo/go/runtime/hashmap.go960
-rw-r--r--libgo/go/runtime/hashmap_fast.go379
-rw-r--r--libgo/go/runtime/iface_test.go125
-rw-r--r--libgo/go/runtime/lfstack_test.go8
-rw-r--r--libgo/go/runtime/lock_futex.go205
-rw-r--r--libgo/go/runtime/lock_sema.go270
-rw-r--r--libgo/go/runtime/malloc.go837
-rw-r--r--libgo/go/runtime/malloc_test.go63
-rw-r--r--libgo/go/runtime/map_test.go68
-rw-r--r--libgo/go/runtime/mapspeed_test.go28
-rw-r--r--libgo/go/runtime/mem.go16
-rw-r--r--libgo/go/runtime/memmove_test.go295
-rw-r--r--libgo/go/runtime/mfinal_test.go3
-rw-r--r--libgo/go/runtime/mgc0.go152
-rw-r--r--libgo/go/runtime/mprof.go668
-rw-r--r--libgo/go/runtime/netpoll.go455
-rw-r--r--libgo/go/runtime/netpoll_epoll.go97
-rw-r--r--libgo/go/runtime/netpoll_kqueue.go101
-rw-r--r--libgo/go/runtime/netpoll_nacl.go26
-rw-r--r--libgo/go/runtime/noasm_arm.go54
-rw-r--r--libgo/go/runtime/norace_test.go4
-rw-r--r--libgo/go/runtime/os_darwin.go24
-rw-r--r--libgo/go/runtime/os_dragonfly.go20
-rw-r--r--libgo/go/runtime/os_freebsd.go17
-rw-r--r--libgo/go/runtime/os_linux.go17
-rw-r--r--libgo/go/runtime/os_nacl.go39
-rw-r--r--libgo/go/runtime/os_netbsd.go20
-rw-r--r--libgo/go/runtime/os_openbsd.go17
-rw-r--r--libgo/go/runtime/os_plan9.go105
-rw-r--r--libgo/go/runtime/os_solaris.go100
-rw-r--r--libgo/go/runtime/os_windows.go58
-rw-r--r--libgo/go/runtime/os_windows_386.go11
-rw-r--r--libgo/go/runtime/os_windows_amd64.go11
-rw-r--r--libgo/go/runtime/panic.go505
-rw-r--r--libgo/go/runtime/parfor_test.go21
-rw-r--r--libgo/go/runtime/pprof/pprof.go65
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go54
-rw-r--r--libgo/go/runtime/print1.go323
-rw-r--r--libgo/go/runtime/proc.go246
-rw-r--r--libgo/go/runtime/proc_test.go137
-rw-r--r--libgo/go/runtime/race0.go37
-rw-r--r--libgo/go/runtime/rdebug.go37
-rw-r--r--libgo/go/runtime/rune.go219
-rw-r--r--libgo/go/runtime/runtime.go60
-rw-r--r--libgo/go/runtime/runtime_test.go165
-rw-r--r--libgo/go/runtime/runtime_unix_test.go2
-rw-r--r--libgo/go/runtime/select.go651
-rw-r--r--libgo/go/runtime/sema.go275
-rw-r--r--libgo/go/runtime/signal_unix.go13
-rw-r--r--libgo/go/runtime/sigpanic_unix.go40
-rw-r--r--libgo/go/runtime/sigqueue.go182
-rw-r--r--libgo/go/runtime/slice.go139
-rw-r--r--libgo/go/runtime/softfloat64.go498
-rw-r--r--libgo/go/runtime/softfloat64_test.go198
-rw-r--r--libgo/go/runtime/stack.go13
-rw-r--r--libgo/go/runtime/string.go298
-rw-r--r--libgo/go/runtime/string_test.go163
-rw-r--r--libgo/go/runtime/stubs.go316
-rw-r--r--libgo/go/runtime/symtab_test.go113
-rw-r--r--libgo/go/runtime/syscall_windows.go174
-rw-r--r--libgo/go/runtime/time.go289
-rw-r--r--libgo/go/runtime/typekind.go44
-rw-r--r--libgo/go/runtime/vlop_arm_test.go70
90 files changed, 2100 insertions, 11473 deletions
diff --git a/libgo/go/runtime/arch_arm.go b/libgo/go/runtime/arch_arm.go
deleted file mode 100644
index 79d38c7..0000000
--- a/libgo/go/runtime/arch_arm.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-type uintreg uint32
-type intptr int32 // TODO(rsc): remove
diff --git a/libgo/go/runtime/atomic.go b/libgo/go/runtime/atomic.go
deleted file mode 100644
index 7e9d9b3..0000000
--- a/libgo/go/runtime/atomic.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !arm
-
-package runtime
-
-import "unsafe"
-
-//go:noescape
-func xadd(ptr *uint32, delta int32) uint32
-
-//go:noescape
-func xadd64(ptr *uint64, delta int64) uint64
-
-//go:noescape
-func xchg(ptr *uint32, new uint32) uint32
-
-//go:noescape
-func xchg64(ptr *uint64, new uint64) uint64
-
-//go:noescape
-func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
-
-//go:noescape
-func xchguintptr(ptr *uintptr, new uintptr) uintptr
-
-//go:noescape
-func atomicload(ptr *uint32) uint32
-
-//go:noescape
-func atomicload64(ptr *uint64) uint64
-
-//go:noescape
-func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
-
-//go:noescape
-func atomicor8(ptr *uint8, val uint8)
-
-//go:noescape
-func cas64(ptr *uint64, old, new uint64) bool
-
-//go:noescape
-func atomicstore(ptr *uint32, val uint32)
-
-//go:noescape
-func atomicstore64(ptr *uint64, val uint64)
-
-//go:noescape
-func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer)
diff --git a/libgo/go/runtime/cgocall.go b/libgo/go/runtime/cgocall.go
deleted file mode 100644
index 7fd9146..0000000
--- a/libgo/go/runtime/cgocall.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Cgo call and callback support.
-//
-// To call into the C function f from Go, the cgo-generated code calls
-// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
-// gcc-compiled function written by cgo.
-//
-// runtime.cgocall (below) locks g to m, calls entersyscall
-// so as not to block other goroutines or the garbage collector,
-// and then calls runtime.asmcgocall(_cgo_Cfunc_f, frame).
-//
-// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
-// (assumed to be an operating system-allocated stack, so safe to run
-// gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
-//
-// _cgo_Cfunc_f invokes the actual C function f with arguments
-// taken from the frame structure, records the results in the frame,
-// and returns to runtime.asmcgocall.
-//
-// After it regains control, runtime.asmcgocall switches back to the
-// original g (m->curg)'s stack and returns to runtime.cgocall.
-//
-// After it regains control, runtime.cgocall calls exitsyscall, which blocks
-// until this m can run Go code without violating the $GOMAXPROCS limit,
-// and then unlocks g from m.
-//
-// The above description skipped over the possibility of the gcc-compiled
-// function f calling back into Go. If that happens, we continue down
-// the rabbit hole during the execution of f.
-//
-// To make it possible for gcc-compiled C code to call a Go function p.GoF,
-// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
-// know about packages). The gcc-compiled C function f calls GoF.
-//
-// GoF calls crosscall2(_cgoexp_GoF, frame, framesize). Crosscall2
-// (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument
-// adapter from the gcc function call ABI to the 6c function call ABI.
-// It is called from gcc to call 6c functions. In this case it calls
-// _cgoexp_GoF(frame, framesize), still running on m->g0's stack
-// and outside the $GOMAXPROCS limit. Thus, this code cannot yet
-// call arbitrary Go code directly and must be careful not to allocate
-// memory or use up m->g0's stack.
-//
-// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize).
-// (The reason for having _cgoexp_GoF instead of writing a crosscall3
-// to make this call directly is that _cgoexp_GoF, because it is compiled
-// with 6c instead of gcc, can refer to dotted names like
-// runtime.cgocallback and p.GoF.)
-//
-// runtime.cgocallback (in asm_$GOARCH.s) switches from m->g0's
-// stack to the original g (m->curg)'s stack, on which it calls
-// runtime.cgocallbackg(p.GoF, frame, framesize).
-// As part of the stack switch, runtime.cgocallback saves the current
-// SP as m->g0->sched.sp, so that any use of m->g0's stack during the
-// execution of the callback will be done below the existing stack frames.
-// Before overwriting m->g0->sched.sp, it pushes the old value on the
-// m->g0 stack, so that it can be restored later.
-//
-// runtime.cgocallbackg (below) is now running on a real goroutine
-// stack (not an m->g0 stack). First it calls runtime.exitsyscall, which will
-// block until the $GOMAXPROCS limit allows running this goroutine.
-// Once exitsyscall has returned, it is safe to do things like call the memory
-// allocator or invoke the Go callback function p.GoF. runtime.cgocallbackg
-// first defers a function to unwind m->g0.sched.sp, so that if p.GoF
-// panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack
-// and the m->curg stack will be unwound in lock step.
-// Then it calls p.GoF. Finally it pops but does not execute the deferred
-// function, calls runtime.entersyscall, and returns to runtime.cgocallback.
-//
-// After it regains control, runtime.cgocallback switches back to
-// m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old
-// m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF.
-//
-// _cgoexp_GoF immediately returns to crosscall2, which restores the
-// callee-save registers for gcc and returns to GoF, which returns to f.
-
-package runtime
-
-import "unsafe"
-
-// Call from Go to C.
-//go:nosplit
-func cgocall(fn, arg unsafe.Pointer) {
- cgocall_errno(fn, arg)
-}
-
-//go:nosplit
-func cgocall_errno(fn, arg unsafe.Pointer) int32 {
- if !iscgo && GOOS != "solaris" && GOOS != "windows" {
- gothrow("cgocall unavailable")
- }
-
- if fn == nil {
- gothrow("cgocall nil")
- }
-
- if raceenabled {
- racereleasemerge(unsafe.Pointer(&racecgosync))
- }
-
- // Create an extra M for callbacks on threads not created by Go on first cgo call.
- if needextram == 1 && cas(&needextram, 1, 0) {
- onM(newextram)
- }
-
- /*
- * Lock g to m to ensure we stay on the same stack if we do a
- * cgo callback. Add entry to defer stack in case of panic.
- */
- lockOSThread()
- mp := getg().m
- mp.ncgocall++
- mp.ncgo++
- defer endcgo(mp)
-
- /*
- * Announce we are entering a system call
- * so that the scheduler knows to create another
- * M to run goroutines while we are in the
- * foreign code.
- *
- * The call to asmcgocall is guaranteed not to
- * split the stack and does not allocate memory,
- * so it is safe to call while "in a system call", outside
- * the $GOMAXPROCS accounting.
- */
- entersyscall()
- errno := asmcgocall_errno(fn, arg)
- exitsyscall()
-
- return errno
-}
-
-//go:nosplit
-func endcgo(mp *m) {
- mp.ncgo--
- if mp.ncgo == 0 {
- // We are going back to Go and are not in a recursive
- // call. Let the GC collect any memory allocated via
- // _cgo_allocate that is no longer referenced.
- mp.cgomal = nil
- }
-
- if raceenabled {
- raceacquire(unsafe.Pointer(&racecgosync))
- }
-
- unlockOSThread() // invalidates mp
-}
-
-// Helper functions for cgo code.
-
-// Filled by schedinit from corresponding C variables,
-// which are in turn filled in by dynamic linker when Cgo is available.
-var cgoMalloc, cgoFree unsafe.Pointer
-
-func cmalloc(n uintptr) unsafe.Pointer {
- var args struct {
- n uint64
- ret unsafe.Pointer
- }
- args.n = uint64(n)
- cgocall(cgoMalloc, unsafe.Pointer(&args))
- if args.ret == nil {
- gothrow("C malloc failed")
- }
- return args.ret
-}
-
-func cfree(p unsafe.Pointer) {
- cgocall(cgoFree, p)
-}
-
-// Call from C back to Go.
-//go:nosplit
-func cgocallbackg() {
- gp := getg()
- if gp != gp.m.curg {
- println("runtime: bad g in cgocallback")
- exit(2)
- }
-
- // entersyscall saves the caller's SP to allow the GC to trace the Go
- // stack. However, since we're returning to an earlier stack frame and
- // need to pair with the entersyscall() call made by cgocall, we must
- // save syscall* and let reentersyscall restore them.
- savedsp := unsafe.Pointer(gp.syscallsp)
- savedpc := gp.syscallpc
- exitsyscall() // coming out of cgo call
- cgocallbackg1()
- // going back to cgo call
- reentersyscall(savedpc, savedsp)
-}
-
-func cgocallbackg1() {
- gp := getg()
- if gp.m.needextram {
- gp.m.needextram = false
- onM(newextram)
- }
-
- // Add entry to defer stack in case of panic.
- restore := true
- defer unwindm(&restore)
-
- if raceenabled {
- raceacquire(unsafe.Pointer(&racecgosync))
- }
-
- type args struct {
- fn *funcval
- arg unsafe.Pointer
- argsize uintptr
- }
- var cb *args
-
- // Location of callback arguments depends on stack frame layout
- // and size of stack frame of cgocallback_gofunc.
- sp := gp.m.g0.sched.sp
- switch GOARCH {
- default:
- gothrow("cgocallbackg is unimplemented on arch")
- case "arm":
- // On arm, stack frame is two words and there's a saved LR between
- // SP and the stack frame and between the stack frame and the arguments.
- cb = (*args)(unsafe.Pointer(sp + 4*ptrSize))
- case "amd64":
- // On amd64, stack frame is one word, plus caller PC.
- cb = (*args)(unsafe.Pointer(sp + 2*ptrSize))
- case "386":
- // On 386, stack frame is three words, plus caller PC.
- cb = (*args)(unsafe.Pointer(sp + 4*ptrSize))
- }
-
- // Invoke callback.
- reflectcall(unsafe.Pointer(cb.fn), unsafe.Pointer(cb.arg), uint32(cb.argsize), 0)
-
- if raceenabled {
- racereleasemerge(unsafe.Pointer(&racecgosync))
- }
-
- // Do not unwind m->g0->sched.sp.
- // Our caller, cgocallback, will do that.
- restore = false
-}
-
-func unwindm(restore *bool) {
- if !*restore {
- return
- }
- // Restore sp saved by cgocallback during
- // unwind of g's stack (see comment at top of file).
- mp := acquirem()
- sched := &mp.g0.sched
- switch GOARCH {
- default:
- gothrow("unwindm not implemented")
- case "386", "amd64":
- sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp))
- case "arm":
- sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 4))
- }
- releasem(mp)
-}
-
-// called from assembly
-func badcgocallback() {
- gothrow("misaligned stack in cgocallback")
-}
-
-// called from (incomplete) assembly
-func cgounimpl() {
- gothrow("cgo not implemented")
-}
-
-var racecgosync uint64 // represents possible synchronization in C code
diff --git a/libgo/go/runtime/cgocallback.go b/libgo/go/runtime/cgocallback.go
deleted file mode 100644
index 2c89143..0000000
--- a/libgo/go/runtime/cgocallback.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-// These functions are called from C code via cgo/callbacks.c.
-
-// Allocate memory. This allocates the requested number of bytes in
-// memory controlled by the Go runtime. The allocated memory will be
-// zeroed. You are responsible for ensuring that the Go garbage
-// collector can see a pointer to the allocated memory for as long as
-// it is valid, e.g., by storing a pointer in a local variable in your
-// C function, or in memory allocated by the Go runtime. If the only
-// pointers are in a C global variable or in memory allocated via
-// malloc, then the Go garbage collector may collect the memory.
-//
-// TODO(rsc,iant): This memory is untyped.
-// Either we need to add types or we need to stop using it.
-
-func _cgo_allocate_internal(len uintptr) unsafe.Pointer {
- if len == 0 {
- len = 1
- }
- ret := unsafe.Pointer(&make([]unsafe.Pointer, (len+ptrSize-1)/ptrSize)[0])
- c := new(cgomal)
- c.alloc = ret
- gp := getg()
- c.next = gp.m.cgomal
- gp.m.cgomal = c
- return ret
-}
-
-// Panic.
-
-func _cgo_panic_internal(p *byte) {
- panic(gostringnocopy(p))
-}
diff --git a/libgo/go/runtime/chan.go b/libgo/go/runtime/chan.go
deleted file mode 100644
index 0eb87df..0000000
--- a/libgo/go/runtime/chan.go
+++ /dev/null
@@ -1,655 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// This file contains the implementation of Go channels.
-
-import "unsafe"
-
-const (
- maxAlign = 8
- hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
- debugChan = false
-)
-
-// TODO(khr): make hchan.buf an unsafe.Pointer, not a *uint8
-
-func makechan(t *chantype, size int64) *hchan {
- elem := t.elem
-
- // compiler checks this but be safe.
- if elem.size >= 1<<16 {
- gothrow("makechan: invalid channel element type")
- }
- if hchanSize%maxAlign != 0 || elem.align > maxAlign {
- gothrow("makechan: bad alignment")
- }
- if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (maxmem-hchanSize)/uintptr(elem.size)) {
- panic("makechan: size out of range")
- }
-
- var c *hchan
- if elem.kind&kindNoPointers != 0 || size == 0 {
- // Allocate memory in one call.
- // Hchan does not contain pointers interesting for GC in this case:
- // buf points into the same allocation, elemtype is persistent.
- // SudoG's are referenced from their owning thread so they can't be collected.
- // TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
- c = (*hchan)(mallocgc(hchanSize+uintptr(size)*uintptr(elem.size), nil, flagNoScan))
- if size > 0 && elem.size != 0 {
- c.buf = (*uint8)(add(unsafe.Pointer(c), hchanSize))
- } else {
- c.buf = (*uint8)(unsafe.Pointer(c)) // race detector uses this location for synchronization
- }
- } else {
- c = new(hchan)
- c.buf = (*uint8)(newarray(elem, uintptr(size)))
- }
- c.elemsize = uint16(elem.size)
- c.elemtype = elem
- c.dataqsiz = uint(size)
-
- if debugChan {
- print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n")
- }
- return c
-}
-
-// chanbuf(c, i) is pointer to the i'th slot in the buffer.
-func chanbuf(c *hchan, i uint) unsafe.Pointer {
- return add(unsafe.Pointer(c.buf), uintptr(i)*uintptr(c.elemsize))
-}
-
-// entry point for c <- x from compiled code
-//go:nosplit
-func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
- chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
-}
-
-/*
- * generic single channel send/recv
- * If block is not nil,
- * then the protocol will not
- * sleep but return if it could
- * not complete.
- *
- * sleep can wake up with g.param == nil
- * when a channel involved in the sleep has
- * been closed. it is easiest to loop and re-run
- * the operation; we'll see that it's now closed.
- */
-func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
- if raceenabled {
- raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend))
- }
-
- if c == nil {
- if !block {
- return false
- }
- gopark(nil, nil, "chan send (nil chan)")
- gothrow("unreachable")
- }
-
- if debugChan {
- print("chansend: chan=", c, "\n")
- }
-
- if raceenabled {
- racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend))
- }
-
- // Fast path: check for failed non-blocking operation without acquiring the lock.
- //
- // After observing that the channel is not closed, we observe that the channel is
- // not ready for sending. Each of these observations is a single word-sized read
- // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
- // Because a closed channel cannot transition from 'ready for sending' to
- // 'not ready for sending', even if the channel is closed between the two observations,
- // they imply a moment between the two when the channel was both not yet closed
- // and not ready for sending. We behave as if we observed the channel at that moment,
- // and report that the send cannot proceed.
- //
- // It is okay if the reads are reordered here: if we observe that the channel is not
- // ready for sending and then observe that it is not closed, that implies that the
- // channel wasn't closed during the first observation.
- if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
- (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
- return false
- }
-
- var t0 int64
- if blockprofilerate > 0 {
- t0 = cputicks()
- }
-
- lock(&c.lock)
- if c.closed != 0 {
- unlock(&c.lock)
- panic("send on closed channel")
- }
-
- if c.dataqsiz == 0 { // synchronous channel
- sg := c.recvq.dequeue()
- if sg != nil { // found a waiting receiver
- if raceenabled {
- racesync(c, sg)
- }
- unlock(&c.lock)
-
- recvg := sg.g
- if sg.elem != nil {
- memmove(unsafe.Pointer(sg.elem), ep, uintptr(c.elemsize))
- sg.elem = nil
- }
- recvg.param = unsafe.Pointer(sg)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(recvg)
- return true
- }
-
- if !block {
- unlock(&c.lock)
- return false
- }
-
- // no receiver available: block on this channel.
- gp := getg()
- mysg := acquireSudog()
- mysg.releasetime = 0
- if t0 != 0 {
- mysg.releasetime = -1
- }
- mysg.elem = ep
- mysg.waitlink = nil
- gp.waiting = mysg
- mysg.g = gp
- mysg.selectdone = nil
- gp.param = nil
- c.sendq.enqueue(mysg)
- goparkunlock(&c.lock, "chan send")
-
- // someone woke us up.
- if mysg != gp.waiting {
- gothrow("G waiting list is corrupted!")
- }
- gp.waiting = nil
- if gp.param == nil {
- if c.closed == 0 {
- gothrow("chansend: spurious wakeup")
- }
- panic("send on closed channel")
- }
- gp.param = nil
- if mysg.releasetime > 0 {
- blockevent(int64(mysg.releasetime)-t0, 2)
- }
- releaseSudog(mysg)
- return true
- }
-
- // asynchronous channel
- // wait for some space to write our data
- var t1 int64
- for c.qcount >= c.dataqsiz {
- if !block {
- unlock(&c.lock)
- return false
- }
- gp := getg()
- mysg := acquireSudog()
- mysg.releasetime = 0
- if t0 != 0 {
- mysg.releasetime = -1
- }
- mysg.g = gp
- mysg.elem = nil
- mysg.selectdone = nil
- c.sendq.enqueue(mysg)
- goparkunlock(&c.lock, "chan send")
-
- // someone woke us up - try again
- if mysg.releasetime > 0 {
- t1 = mysg.releasetime
- }
- releaseSudog(mysg)
- lock(&c.lock)
- if c.closed != 0 {
- unlock(&c.lock)
- panic("send on closed channel")
- }
- }
-
- // write our data into the channel buffer
- if raceenabled {
- raceacquire(chanbuf(c, c.sendx))
- racerelease(chanbuf(c, c.sendx))
- }
- memmove(chanbuf(c, c.sendx), ep, uintptr(c.elemsize))
- c.sendx++
- if c.sendx == c.dataqsiz {
- c.sendx = 0
- }
- c.qcount++
-
- // wake up a waiting receiver
- sg := c.recvq.dequeue()
- if sg != nil {
- recvg := sg.g
- unlock(&c.lock)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(recvg)
- } else {
- unlock(&c.lock)
- }
- if t1 > 0 {
- blockevent(t1-t0, 2)
- }
- return true
-}
-
-func closechan(c *hchan) {
- if c == nil {
- panic("close of nil channel")
- }
-
- lock(&c.lock)
- if c.closed != 0 {
- unlock(&c.lock)
- panic("close of closed channel")
- }
-
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&c))
- racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
- racerelease(unsafe.Pointer(c))
- }
-
- c.closed = 1
-
- // release all readers
- for {
- sg := c.recvq.dequeue()
- if sg == nil {
- break
- }
- gp := sg.g
- sg.elem = nil
- gp.param = nil
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
- }
-
- // release all writers
- for {
- sg := c.sendq.dequeue()
- if sg == nil {
- break
- }
- gp := sg.g
- sg.elem = nil
- gp.param = nil
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
- }
- unlock(&c.lock)
-}
-
-// entry points for <- c from compiled code
-//go:nosplit
-func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) {
- chanrecv(t, c, elem, true)
-}
-
-//go:nosplit
-func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) {
- _, received = chanrecv(t, c, elem, true)
- return
-}
-
-// chanrecv receives on channel c and writes the received data to ep.
-// ep may be nil, in which case received data is ignored.
-// If block == false and no elements are available, returns (false, false).
-// Otherwise, if c is closed, zeros *ep and returns (true, false).
-// Otherwise, fills in *ep with an element and returns (true, true).
-func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
- // raceenabled: don't need to check ep, as it is always on the stack.
-
- if debugChan {
- print("chanrecv: chan=", c, "\n")
- }
-
- if c == nil {
- if !block {
- return
- }
- gopark(nil, nil, "chan receive (nil chan)")
- gothrow("unreachable")
- }
-
- // Fast path: check for failed non-blocking operation without acquiring the lock.
- //
- // After observing that the channel is not ready for receiving, we observe that the
- // channel is not closed. Each of these observations is a single word-sized read
- // (first c.sendq.first or c.qcount, and second c.closed).
- // Because a channel cannot be reopened, the later observation of the channel
- // being not closed implies that it was also not closed at the moment of the
- // first observation. We behave as if we observed the channel at that moment
- // and report that the receive cannot proceed.
- //
- // The order of operations is important here: reversing the operations can lead to
- // incorrect behavior when racing with a close.
- if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
- c.dataqsiz > 0 && atomicloaduint(&c.qcount) == 0) &&
- atomicload(&c.closed) == 0 {
- return
- }
-
- var t0 int64
- if blockprofilerate > 0 {
- t0 = cputicks()
- }
-
- lock(&c.lock)
- if c.dataqsiz == 0 { // synchronous channel
- if c.closed != 0 {
- return recvclosed(c, ep)
- }
-
- sg := c.sendq.dequeue()
- if sg != nil {
- if raceenabled {
- racesync(c, sg)
- }
- unlock(&c.lock)
-
- if ep != nil {
- memmove(ep, sg.elem, uintptr(c.elemsize))
- }
- sg.elem = nil
- gp := sg.g
- gp.param = unsafe.Pointer(sg)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
- selected = true
- received = true
- return
- }
-
- if !block {
- unlock(&c.lock)
- return
- }
-
- // no sender available: block on this channel.
- gp := getg()
- mysg := acquireSudog()
- mysg.releasetime = 0
- if t0 != 0 {
- mysg.releasetime = -1
- }
- mysg.elem = ep
- mysg.waitlink = nil
- gp.waiting = mysg
- mysg.g = gp
- mysg.selectdone = nil
- gp.param = nil
- c.recvq.enqueue(mysg)
- goparkunlock(&c.lock, "chan receive")
-
- // someone woke us up
- if mysg != gp.waiting {
- gothrow("G waiting list is corrupted!")
- }
- gp.waiting = nil
- if mysg.releasetime > 0 {
- blockevent(mysg.releasetime-t0, 2)
- }
- haveData := gp.param != nil
- gp.param = nil
- releaseSudog(mysg)
-
- if haveData {
- // a sender sent us some data. It already wrote to ep.
- selected = true
- received = true
- return
- }
-
- lock(&c.lock)
- if c.closed == 0 {
- gothrow("chanrecv: spurious wakeup")
- }
- return recvclosed(c, ep)
- }
-
- // asynchronous channel
- // wait for some data to appear
- var t1 int64
- for c.qcount <= 0 {
- if c.closed != 0 {
- selected, received = recvclosed(c, ep)
- if t1 > 0 {
- blockevent(t1-t0, 2)
- }
- return
- }
-
- if !block {
- unlock(&c.lock)
- return
- }
-
- // wait for someone to send an element
- gp := getg()
- mysg := acquireSudog()
- mysg.releasetime = 0
- if t0 != 0 {
- mysg.releasetime = -1
- }
- mysg.elem = nil
- mysg.g = gp
- mysg.selectdone = nil
-
- c.recvq.enqueue(mysg)
- goparkunlock(&c.lock, "chan receive")
-
- // someone woke us up - try again
- if mysg.releasetime > 0 {
- t1 = mysg.releasetime
- }
- releaseSudog(mysg)
- lock(&c.lock)
- }
-
- if raceenabled {
- raceacquire(chanbuf(c, c.recvx))
- racerelease(chanbuf(c, c.recvx))
- }
- if ep != nil {
- memmove(ep, chanbuf(c, c.recvx), uintptr(c.elemsize))
- }
- memclr(chanbuf(c, c.recvx), uintptr(c.elemsize))
-
- c.recvx++
- if c.recvx == c.dataqsiz {
- c.recvx = 0
- }
- c.qcount--
-
- // ping a sender now that there is space
- sg := c.sendq.dequeue()
- if sg != nil {
- gp := sg.g
- unlock(&c.lock)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
- } else {
- unlock(&c.lock)
- }
-
- if t1 > 0 {
- blockevent(t1-t0, 2)
- }
- selected = true
- received = true
- return
-}
-
-// recvclosed is a helper function for chanrecv. Handles cleanup
-// when the receiver encounters a closed channel.
-// Caller must hold c.lock, recvclosed will release the lock.
-func recvclosed(c *hchan, ep unsafe.Pointer) (selected, recevied bool) {
- if raceenabled {
- raceacquire(unsafe.Pointer(c))
- }
- unlock(&c.lock)
- if ep != nil {
- memclr(ep, uintptr(c.elemsize))
- }
- return true, false
-}
-
-// compiler implements
-//
-// select {
-// case c <- v:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbsend(c, v) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) {
- return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t)))
-}
-
-// compiler implements
-//
-// select {
-// case v = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if selectnbrecv(&v, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) {
- selected, _ = chanrecv(t, c, elem, false)
- return
-}
-
-// compiler implements
-//
-// select {
-// case v, ok = <-c:
-// ... foo
-// default:
-// ... bar
-// }
-//
-// as
-//
-// if c != nil && selectnbrecv2(&v, &ok, c) {
-// ... foo
-// } else {
-// ... bar
-// }
-//
-func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
- // TODO(khr): just return 2 values from this function, now that it is in Go.
- selected, *received = chanrecv(t, c, elem, false)
- return
-}
-
-func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
- return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t)))
-}
-
-func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
- return chanrecv(t, c, elem, !nb)
-}
-
-func reflect_chanlen(c *hchan) int {
- if c == nil {
- return 0
- }
- return int(c.qcount)
-}
-
-func reflect_chancap(c *hchan) int {
- if c == nil {
- return 0
- }
- return int(c.dataqsiz)
-}
-
-func (q *waitq) enqueue(sgp *sudog) {
- sgp.next = nil
- if q.first == nil {
- q.first = sgp
- q.last = sgp
- return
- }
- q.last.next = sgp
- q.last = sgp
-}
-
-func (q *waitq) dequeue() *sudog {
- for {
- sgp := q.first
- if sgp == nil {
- return nil
- }
- q.first = sgp.next
- sgp.next = nil
- if q.last == sgp {
- q.last = nil
- }
-
- // if sgp participates in a select and is already signaled, ignore it
- if sgp.selectdone != nil {
- // claim the right to signal
- if *sgp.selectdone != 0 || !cas(sgp.selectdone, 0, 1) {
- continue
- }
- }
-
- return sgp
- }
-}
-
-func racesync(c *hchan, sg *sudog) {
- racerelease(chanbuf(c, 0))
- raceacquireg(sg.g, chanbuf(c, 0))
- racereleaseg(sg.g, chanbuf(c, 0))
- raceacquire(chanbuf(c, 0))
-}
diff --git a/libgo/go/runtime/chan_test.go b/libgo/go/runtime/chan_test.go
index 4fb305c..6553509 100644
--- a/libgo/go/runtime/chan_test.go
+++ b/libgo/go/runtime/chan_test.go
@@ -223,6 +223,81 @@ func TestNonblockRecvRace(t *testing.T) {
}
}
+// This test checks that select acts on the state of the channels at one
+// moment in the execution, not over a smeared time window.
+// In the test, one goroutine does:
+// create c1, c2
+// make c1 ready for receiving
+// create second goroutine
+// make c2 ready for receiving
+// make c1 no longer ready for receiving (if possible)
+// The second goroutine does a non-blocking select receiving from c1 and c2.
+// From the time the second goroutine is created, at least one of c1 and c2
+// is always ready for receiving, so the select in the second goroutine must
+// always receive from one or the other. It must never execute the default case.
+func TestNonblockSelectRace(t *testing.T) {
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
+ done := make(chan bool, 1)
+ for i := 0; i < n; i++ {
+ c1 := make(chan int, 1)
+ c2 := make(chan int, 1)
+ c1 <- 1
+ go func() {
+ select {
+ case <-c1:
+ case <-c2:
+ default:
+ done <- false
+ return
+ }
+ done <- true
+ }()
+ c2 <- 1
+ select {
+ case <-c1:
+ default:
+ }
+ if !<-done {
+ t.Fatal("no chan is ready")
+ }
+ }
+}
+
+// Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
+func TestNonblockSelectRace2(t *testing.T) {
+ n := 100000
+ if testing.Short() {
+ n = 1000
+ }
+ done := make(chan bool, 1)
+ for i := 0; i < n; i++ {
+ c1 := make(chan int, 1)
+ c2 := make(chan int)
+ c1 <- 1
+ go func() {
+ select {
+ case <-c1:
+ case <-c2:
+ default:
+ done <- false
+ return
+ }
+ done <- true
+ }()
+ close(c2)
+ select {
+ case <-c1:
+ default:
+ }
+ if !<-done {
+ t.Fatal("no chan is ready")
+ }
+ }
+}
+
func TestSelfSelect(t *testing.T) {
// Ensure that send/recv on the same chan in select
// does not crash nor deadlock.
@@ -458,7 +533,7 @@ func TestMultiConsumer(t *testing.T) {
func TestShrinkStackDuringBlockedSend(t *testing.T) {
// make sure that channel operations still work when we are
// blocked on a channel send and we shrink the stack.
- // NOTE: this test probably won't fail unless stack.c:StackDebug
+ // NOTE: this test probably won't fail unless stack1.go:stackDebug
// is set to >= 1.
const n = 10
c := make(chan int)
@@ -823,3 +898,30 @@ func BenchmarkChanSem(b *testing.B) {
}
})
}
+
+func BenchmarkChanPopular(b *testing.B) {
+ const n = 1000
+ c := make(chan bool)
+ var a []chan bool
+ var wg sync.WaitGroup
+ wg.Add(n)
+ for j := 0; j < n; j++ {
+ d := make(chan bool)
+ a = append(a, d)
+ go func() {
+ for i := 0; i < b.N; i++ {
+ select {
+ case <-c:
+ case <-d:
+ }
+ }
+ wg.Done()
+ }()
+ }
+ for i := 0; i < b.N; i++ {
+ for _, d := range a {
+ d <- true
+ }
+ }
+ wg.Wait()
+}
diff --git a/libgo/go/runtime/chanbarrier_test.go b/libgo/go/runtime/chanbarrier_test.go
new file mode 100644
index 0000000..770b850
--- /dev/null
+++ b/libgo/go/runtime/chanbarrier_test.go
@@ -0,0 +1,83 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+)
+
+type response struct {
+}
+
+type myError struct {
+}
+
+func (myError) Error() string { return "" }
+
+func doRequest(useSelect bool) (*response, error) {
+ type async struct {
+ resp *response
+ err error
+ }
+ ch := make(chan *async, 0)
+ done := make(chan struct{}, 0)
+
+ if useSelect {
+ go func() {
+ select {
+ case ch <- &async{resp: nil, err: myError{}}:
+ case <-done:
+ }
+ }()
+ } else {
+ go func() {
+ ch <- &async{resp: nil, err: myError{}}
+ }()
+ }
+
+ r := <-ch
+ runtime.Gosched()
+ return r.resp, r.err
+}
+
+func TestChanSendSelectBarrier(t *testing.T) {
+ testChanSendBarrier(true)
+}
+
+func TestChanSendBarrier(t *testing.T) {
+ testChanSendBarrier(false)
+}
+
+func testChanSendBarrier(useSelect bool) {
+ var wg sync.WaitGroup
+ var globalMu sync.Mutex
+ outer := 100
+ inner := 100000
+ if testing.Short() {
+ outer = 10
+ inner = 1000
+ }
+ for i := 0; i < outer; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var garbage []byte
+ for j := 0; j < inner; j++ {
+ _, err := doRequest(useSelect)
+ _, ok := err.(myError)
+ if !ok {
+ panic(1)
+ }
+ garbage = make([]byte, 1<<10)
+ }
+ globalMu.Lock()
+ global = garbage
+ globalMu.Unlock()
+ }()
+ }
+ wg.Wait()
+}
diff --git a/libgo/go/runtime/compiler.go b/libgo/go/runtime/compiler.go
index 0ed3b18..b04be61 100644
--- a/libgo/go/runtime/compiler.go
+++ b/libgo/go/runtime/compiler.go
@@ -7,7 +7,7 @@ package runtime
// Compiler is the name of the compiler toolchain that built the
// running binary. Known toolchains are:
//
-// gc The 5g/6g/8g compiler suite at code.google.com/p/go.
+// gc Also known as cmd/compile.
// gccgo The gccgo front end, part of the GCC compiler suite.
//
const Compiler = "gccgo"
diff --git a/libgo/go/runtime/complex.go b/libgo/go/runtime/complex.go
deleted file mode 100644
index ec50f89..0000000
--- a/libgo/go/runtime/complex.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-func complex128div(n complex128, d complex128) complex128 {
- // Special cases as in C99.
- ninf := real(n) == posinf || real(n) == neginf ||
- imag(n) == posinf || imag(n) == neginf
- dinf := real(d) == posinf || real(d) == neginf ||
- imag(d) == posinf || imag(d) == neginf
-
- nnan := !ninf && (real(n) != real(n) || imag(n) != imag(n))
- dnan := !dinf && (real(d) != real(d) || imag(d) != imag(d))
-
- switch {
- case nnan || dnan:
- return complex(nan, nan)
- case ninf && !dinf:
- return complex(posinf, posinf)
- case !ninf && dinf:
- return complex(0, 0)
- case real(d) == 0 && imag(d) == 0:
- if real(n) == 0 && imag(n) == 0 {
- return complex(nan, nan)
- } else {
- return complex(posinf, posinf)
- }
- default:
- // Standard complex arithmetic, factored to avoid unnecessary overflow.
- a := real(d)
- if a < 0 {
- a = -a
- }
- b := imag(d)
- if b < 0 {
- b = -b
- }
- if a <= b {
- ratio := real(d) / imag(d)
- denom := real(d)*ratio + imag(d)
- return complex((real(n)*ratio+imag(n))/denom,
- (imag(n)*ratio-real(n))/denom)
- } else {
- ratio := imag(d) / real(d)
- denom := imag(d)*ratio + real(d)
- return complex((imag(n)*ratio+real(n))/denom,
- (imag(n)-real(n)*ratio)/denom)
- }
- }
-}
diff --git a/libgo/go/runtime/cpuprof.go b/libgo/go/runtime/cpuprof.go
deleted file mode 100644
index 8b1c1c6..0000000
--- a/libgo/go/runtime/cpuprof.go
+++ /dev/null
@@ -1,425 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// CPU profiling.
-// Based on algorithms and data structures used in
-// http://code.google.com/p/google-perftools/.
-//
-// The main difference between this code and the google-perftools
-// code is that this code is written to allow copying the profile data
-// to an arbitrary io.Writer, while the google-perftools code always
-// writes to an operating system file.
-//
-// The signal handler for the profiling clock tick adds a new stack trace
-// to a hash table tracking counts for recent traces. Most clock ticks
-// hit in the cache. In the event of a cache miss, an entry must be
-// evicted from the hash table, copied to a log that will eventually be
-// written as profile data. The google-perftools code flushed the
-// log itself during the signal handler. This code cannot do that, because
-// the io.Writer might block or need system calls or locks that are not
-// safe to use from within the signal handler. Instead, we split the log
-// into two halves and let the signal handler fill one half while a goroutine
-// is writing out the other half. When the signal handler fills its half, it
-// offers to swap with the goroutine. If the writer is not done with its half,
-// we lose the stack trace for this clock tick (and record that loss).
-// The goroutine interacts with the signal handler by calling getprofile() to
-// get the next log piece to write, implicitly handing back the last log
-// piece it obtained.
-//
-// The state of this dance between the signal handler and the goroutine
-// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine
-// is not using either log half and is waiting (or will soon be waiting) for
-// a new piece by calling notesleep(&p->wait). If the signal handler
-// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait)
-// to wake the goroutine. The value indicates the number of entries in the
-// log half being handed off. The goroutine leaves the non-zero value in
-// place until it has finished processing the log half and then flips the number
-// back to zero. Setting the high bit in handoff means that the profiling is over,
-// and the goroutine is now in charge of flushing the data left in the hash table
-// to the log and returning that data.
-//
-// The handoff field is manipulated using atomic operations.
-// For the most part, the manipulation of handoff is orderly: if handoff == 0
-// then the signal handler owns it and can change it to non-zero.
-// If handoff != 0 then the goroutine owns it and can change it to zero.
-// If that were the end of the story then we would not need to manipulate
-// handoff using atomic operations. The operations are needed, however,
-// in order to let the log closer set the high bit to indicate "EOF" safely
-// in the situation when normally the goroutine "owns" handoff.
-
-package runtime
-
-import "unsafe"
-
-const (
- numBuckets = 1 << 10
- logSize = 1 << 17
- assoc = 4
- maxCPUProfStack = 64
-)
-
-type cpuprofEntry struct {
- count uintptr
- depth uintptr
- stack [maxCPUProfStack]uintptr
-}
-
-type cpuProfile struct {
- on bool // profiling is on
- wait note // goroutine waits here
- count uintptr // tick count
- evicts uintptr // eviction count
- lost uintptr // lost ticks that need to be logged
-
- // Active recent stack traces.
- hash [numBuckets]struct {
- entry [assoc]cpuprofEntry
- }
-
- // Log of traces evicted from hash.
- // Signal handler has filled log[toggle][:nlog].
- // Goroutine is writing log[1-toggle][:handoff].
- log [2][logSize / 2]uintptr
- nlog uintptr
- toggle int32
- handoff uint32
-
- // Writer state.
- // Writer maintains its own toggle to avoid races
- // looking at signal handler's toggle.
- wtoggle uint32
- wholding bool // holding & need to release a log half
- flushing bool // flushing hash table - profile is over
- eodSent bool // special end-of-data record sent; => flushing
-}
-
-var (
- cpuprofLock mutex
- cpuprof *cpuProfile
-
- eod = [3]uintptr{0, 1, 0}
-)
-
-func setcpuprofilerate_m() // proc.c
-
-func setcpuprofilerate(hz int32) {
- g := getg()
- g.m.scalararg[0] = uintptr(hz)
- onM(setcpuprofilerate_m)
-}
-
-// lostProfileData is a no-op function used in profiles
-// to mark the number of profiling stack traces that were
-// discarded due to slow data writers.
-func lostProfileData() {}
-
-// SetCPUProfileRate sets the CPU profiling rate to hz samples per second.
-// If hz <= 0, SetCPUProfileRate turns off profiling.
-// If the profiler is on, the rate cannot be changed without first turning it off.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.cpuprofile flag instead of calling
-// SetCPUProfileRate directly.
-func SetCPUProfileRate(hz int) {
- // Clamp hz to something reasonable.
- if hz < 0 {
- hz = 0
- }
- if hz > 1000000 {
- hz = 1000000
- }
-
- lock(&cpuprofLock)
- if hz > 0 {
- if cpuprof == nil {
- cpuprof = (*cpuProfile)(sysAlloc(unsafe.Sizeof(cpuProfile{}), &memstats.other_sys))
- if cpuprof == nil {
- print("runtime: cpu profiling cannot allocate memory\n")
- unlock(&cpuprofLock)
- return
- }
- }
- if cpuprof.on || cpuprof.handoff != 0 {
- print("runtime: cannot set cpu profile rate until previous profile has finished.\n")
- unlock(&cpuprofLock)
- return
- }
-
- cpuprof.on = true
- // pprof binary header format.
- // http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117
- p := &cpuprof.log[0]
- p[0] = 0 // count for header
- p[1] = 3 // depth for header
- p[2] = 0 // version number
- p[3] = uintptr(1e6 / hz) // period (microseconds)
- p[4] = 0
- cpuprof.nlog = 5
- cpuprof.toggle = 0
- cpuprof.wholding = false
- cpuprof.wtoggle = 0
- cpuprof.flushing = false
- cpuprof.eodSent = false
- noteclear(&cpuprof.wait)
-
- setcpuprofilerate(int32(hz))
- } else if cpuprof != nil && cpuprof.on {
- setcpuprofilerate(0)
- cpuprof.on = false
-
- // Now add is not running anymore, and getprofile owns the entire log.
- // Set the high bit in prof->handoff to tell getprofile.
- for {
- n := cpuprof.handoff
- if n&0x80000000 != 0 {
- print("runtime: setcpuprofile(off) twice\n")
- }
- if cas(&cpuprof.handoff, n, n|0x80000000) {
- if n == 0 {
- // we did the transition from 0 -> nonzero so we wake getprofile
- notewakeup(&cpuprof.wait)
- }
- break
- }
- }
- }
- unlock(&cpuprofLock)
-}
-
-func cpuproftick(pc *uintptr, n int32) {
- if n > maxCPUProfStack {
- n = maxCPUProfStack
- }
- s := (*[maxCPUProfStack]uintptr)(unsafe.Pointer(pc))[:n]
- cpuprof.add(s)
-}
-
-// add adds the stack trace to the profile.
-// It is called from signal handlers and other limited environments
-// and cannot allocate memory or acquire locks that might be
-// held at the time of the signal, nor can it use substantial amounts
-// of stack. It is allowed to call evict.
-func (p *cpuProfile) add(pc []uintptr) {
- // Compute hash.
- h := uintptr(0)
- for _, x := range pc {
- h = h<<8 | (h >> (8 * (unsafe.Sizeof(h) - 1)))
- h += x*31 + x*7 + x*3
- }
- p.count++
-
- // Add to entry count if already present in table.
- b := &p.hash[h%numBuckets]
-Assoc:
- for i := range b.entry {
- e := &b.entry[i]
- if e.depth != uintptr(len(pc)) {
- continue
- }
- for j := range pc {
- if e.stack[j] != pc[j] {
- continue Assoc
- }
- }
- e.count++
- return
- }
-
- // Evict entry with smallest count.
- var e *cpuprofEntry
- for i := range b.entry {
- if e == nil || b.entry[i].count < e.count {
- e = &b.entry[i]
- }
- }
- if e.count > 0 {
- if !p.evict(e) {
- // Could not evict entry. Record lost stack.
- p.lost++
- return
- }
- p.evicts++
- }
-
- // Reuse the newly evicted entry.
- e.depth = uintptr(len(pc))
- e.count = 1
- copy(e.stack[:], pc)
-}
-
-// evict copies the given entry's data into the log, so that
-// the entry can be reused. evict is called from add, which
-// is called from the profiling signal handler, so it must not
-// allocate memory or block. It is safe to call flushlog.
-// evict returns true if the entry was copied to the log,
-// false if there was no room available.
-func (p *cpuProfile) evict(e *cpuprofEntry) bool {
- d := e.depth
- nslot := d + 2
- log := &p.log[p.toggle]
- if p.nlog+nslot > uintptr(len(p.log[0])) {
- if !p.flushlog() {
- return false
- }
- log = &p.log[p.toggle]
- }
-
- q := p.nlog
- log[q] = e.count
- q++
- log[q] = d
- q++
- copy(log[q:], e.stack[:d])
- q += d
- p.nlog = q
- e.count = 0
- return true
-}
-
-// flushlog tries to flush the current log and switch to the other one.
-// flushlog is called from evict, called from add, called from the signal handler,
-// so it cannot allocate memory or block. It can try to swap logs with
-// the writing goroutine, as explained in the comment at the top of this file.
-func (p *cpuProfile) flushlog() bool {
- if !cas(&p.handoff, 0, uint32(p.nlog)) {
- return false
- }
- notewakeup(&p.wait)
-
- p.toggle = 1 - p.toggle
- log := &p.log[p.toggle]
- q := uintptr(0)
- if p.lost > 0 {
- lostPC := funcPC(lostProfileData)
- log[0] = p.lost
- log[1] = 1
- log[2] = lostPC
- q = 3
- p.lost = 0
- }
- p.nlog = q
- return true
-}
-
-// getprofile blocks until the next block of profiling data is available
-// and returns it as a []byte. It is called from the writing goroutine.
-func (p *cpuProfile) getprofile() []byte {
- if p == nil {
- return nil
- }
-
- if p.wholding {
- // Release previous log to signal handling side.
- // Loop because we are racing against SetCPUProfileRate(0).
- for {
- n := p.handoff
- if n == 0 {
- print("runtime: phase error during cpu profile handoff\n")
- return nil
- }
- if n&0x80000000 != 0 {
- p.wtoggle = 1 - p.wtoggle
- p.wholding = false
- p.flushing = true
- goto Flush
- }
- if cas(&p.handoff, n, 0) {
- break
- }
- }
- p.wtoggle = 1 - p.wtoggle
- p.wholding = false
- }
-
- if p.flushing {
- goto Flush
- }
-
- if !p.on && p.handoff == 0 {
- return nil
- }
-
- // Wait for new log.
- notetsleepg(&p.wait, -1)
- noteclear(&p.wait)
-
- switch n := p.handoff; {
- case n == 0:
- print("runtime: phase error during cpu profile wait\n")
- return nil
- case n == 0x80000000:
- p.flushing = true
- goto Flush
- default:
- n &^= 0x80000000
-
- // Return new log to caller.
- p.wholding = true
-
- return uintptrBytes(p.log[p.wtoggle][:n])
- }
-
- // In flush mode.
- // Add is no longer being called. We own the log.
- // Also, p->handoff is non-zero, so flushlog will return false.
- // Evict the hash table into the log and return it.
-Flush:
- for i := range p.hash {
- b := &p.hash[i]
- for j := range b.entry {
- e := &b.entry[j]
- if e.count > 0 && !p.evict(e) {
- // Filled the log. Stop the loop and return what we've got.
- break Flush
- }
- }
- }
-
- // Return pending log data.
- if p.nlog > 0 {
- // Note that we're using toggle now, not wtoggle,
- // because we're working on the log directly.
- n := p.nlog
- p.nlog = 0
- return uintptrBytes(p.log[p.toggle][:n])
- }
-
- // Made it through the table without finding anything to log.
- if !p.eodSent {
- // We may not have space to append this to the partial log buf,
- // so we always return a new slice for the end-of-data marker.
- p.eodSent = true
- return uintptrBytes(eod[:])
- }
-
- // Finally done. Clean up and return nil.
- p.flushing = false
- if !cas(&p.handoff, p.handoff, 0) {
- print("runtime: profile flush racing with something\n")
- }
- return nil
-}
-
-func uintptrBytes(p []uintptr) (ret []byte) {
- pp := (*sliceStruct)(unsafe.Pointer(&p))
- rp := (*sliceStruct)(unsafe.Pointer(&ret))
-
- rp.array = pp.array
- rp.len = pp.len * int(unsafe.Sizeof(p[0]))
- rp.cap = rp.len
-
- return
-}
-
-// CPUProfile returns the next chunk of binary CPU profiling stack trace data,
-// blocking until data is available. If profiling is turned off and all the profile
-// data accumulated while it was on has been returned, CPUProfile returns nil.
-// The caller must save the returned data before calling CPUProfile again.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.cpuprofile flag instead of calling
-// CPUProfile directly.
-func CPUProfile() []byte {
- return cpuprof.getprofile()
-}
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
index 29f90fa..2e65e4c 100644
--- a/libgo/go/runtime/crash_cgo_test.go
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -36,6 +36,20 @@ func TestCgoTraceback(t *testing.T) {
}
}
+func TestCgoCallbackGC(t *testing.T) {
+ if runtime.GOOS == "plan9" || runtime.GOOS == "windows" {
+ t.Skipf("no pthreads on %s", runtime.GOOS)
+ }
+ if testing.Short() && runtime.GOOS == "dragonfly" {
+ t.Skip("see golang.org/issue/11990")
+ }
+ got := executeTest(t, cgoCallbackGCSource, nil)
+ want := "OK\n"
+ if got != want {
+ t.Fatalf("expected %q, but got %q", want, got)
+ }
+}
+
func TestCgoExternalThreadPanic(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skipf("no pthreads on %s", runtime.GOOS)
@@ -57,17 +71,24 @@ func TestCgoExternalThreadSIGPROF(t *testing.T) {
case "plan9", "windows":
t.Skipf("no pthreads on %s", runtime.GOOS)
case "darwin":
- // static constructor needs external linking, but we don't support
- // external linking on OS X 10.6.
- out, err := exec.Command("uname", "-r").Output()
- if err != nil {
- t.Fatalf("uname -r failed: %v", err)
- }
- // OS X 10.6 == Darwin 10.x
- if strings.HasPrefix(string(out), "10.") {
- t.Skipf("no external linking on OS X 10.6")
+ if runtime.GOARCH != "arm" && runtime.GOARCH != "arm64" {
+ // static constructor needs external linking, but we don't support
+ // external linking on OS X 10.6.
+ out, err := exec.Command("uname", "-r").Output()
+ if err != nil {
+ t.Fatalf("uname -r failed: %v", err)
+ }
+ // OS X 10.6 == Darwin 10.x
+ if strings.HasPrefix(string(out), "10.") {
+ t.Skipf("no external linking on OS X 10.6")
+ }
}
}
+ if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" {
+ // TODO(austin) External linking not implemented on
+ // ppc64 (issue #8912)
+ t.Skipf("no external linking on ppc64")
+ }
got := executeTest(t, cgoExternalThreadSIGPROFSource, nil)
want := "OK\n"
if got != want {
@@ -75,6 +96,31 @@ func TestCgoExternalThreadSIGPROF(t *testing.T) {
}
}
+func TestCgoExternalThreadSignal(t *testing.T) {
+ // issue 10139
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("no pthreads on %s", runtime.GOOS)
+ }
+ got := executeTest(t, cgoExternalThreadSignalSource, nil)
+ want := "OK\n"
+ if got != want {
+ t.Fatalf("expected %q, but got %q", want, got)
+ }
+}
+
+func TestCgoDLLImports(t *testing.T) {
+ // test issue 9356
+ if runtime.GOOS != "windows" {
+ t.Skip("skipping windows specific test")
+ }
+ got := executeTest(t, cgoDLLImportsMainSource, nil, "a/a.go", cgoDLLImportsPkgSource)
+ want := "OK\n"
+ if got != want {
+ t.Fatalf("expected %q, but got %v", want, got)
+ }
+}
+
const cgoSignalDeadlockSource = `
package main
@@ -159,6 +205,83 @@ func main() {
}
`
+const cgoCallbackGCSource = `
+package main
+
+import "runtime"
+
+/*
+#include <pthread.h>
+
+void go_callback();
+
+static void *thr(void *arg) {
+ go_callback();
+ return 0;
+}
+
+static void foo() {
+ pthread_t th;
+ pthread_create(&th, 0, thr, 0);
+ pthread_join(th, 0);
+}
+*/
+import "C"
+import "fmt"
+
+//export go_callback
+func go_callback() {
+ runtime.GC()
+ grow()
+ runtime.GC()
+}
+
+var cnt int
+
+func grow() {
+ x := 10000
+ sum := 0
+ if grow1(&x, &sum) == 0 {
+ panic("bad")
+ }
+}
+
+func grow1(x, sum *int) int {
+ if *x == 0 {
+ return *sum + 1
+ }
+ *x--
+ sum1 := *sum + *x
+ return grow1(x, &sum1)
+}
+
+func main() {
+ const P = 100
+ done := make(chan bool)
+ // allocate a bunch of stack frames and spray them with pointers
+ for i := 0; i < P; i++ {
+ go func() {
+ grow()
+ done <- true
+ }()
+ }
+ for i := 0; i < P; i++ {
+ <-done
+ }
+ // now give these stack frames to cgo callbacks
+ for i := 0; i < P; i++ {
+ go func() {
+ C.foo()
+ done <- true
+ }()
+ }
+ for i := 0; i < P; i++ {
+ <-done
+ }
+ fmt.Printf("OK\n")
+}
+`
+
const cgoExternalThreadPanicSource = `
package main
@@ -254,7 +377,7 @@ import (
func main() {
// This test intends to test that sending SIGPROF to foreign threads
// before we make any cgo call will not abort the whole process, so
- // we cannot make any cgo call here. See http://golang.org/issue/9456.
+ // we cannot make any cgo call here. See https://golang.org/issue/9456.
atomic.StoreInt32((*int32)(unsafe.Pointer(&C.spinlock)), 1)
for atomic.LoadInt32((*int32)(unsafe.Pointer(&C.spinlock))) == 1 {
runtime.Gosched()
@@ -262,3 +385,97 @@ func main() {
println("OK")
}
`
+
+const cgoExternalThreadSignalSource = `
+package main
+
+/*
+#include <pthread.h>
+
+void **nullptr;
+
+void *crash(void *p) {
+ *nullptr = p;
+ return 0;
+}
+
+int start_crashing_thread(void) {
+ pthread_t tid;
+ return pthread_create(&tid, 0, crash, 0);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "time"
+)
+
+func main() {
+ if len(os.Args) > 1 && os.Args[1] == "crash" {
+ i := C.start_crashing_thread()
+ if i != 0 {
+ fmt.Println("pthread_create failed:", i)
+ // Exit with 0 because parent expects us to crash.
+ return
+ }
+
+ // We should crash immediately, but give it plenty of
+ // time before failing (by exiting 0) in case we are
+ // running on a slow system.
+ time.Sleep(5 * time.Second)
+ return
+ }
+
+ out, err := exec.Command(os.Args[0], "crash").CombinedOutput()
+ if err == nil {
+ fmt.Println("C signal did not crash as expected\n")
+ fmt.Printf("%s\n", out)
+ os.Exit(1)
+ }
+
+ fmt.Println("OK")
+}
+`
+
+const cgoDLLImportsMainSource = `
+package main
+
+/*
+#include <windows.h>
+
+DWORD getthread() {
+ return GetCurrentThreadId();
+}
+*/
+import "C"
+
+import "./a"
+
+func main() {
+ C.getthread()
+ a.GetThread()
+ println("OK")
+}
+`
+
+const cgoDLLImportsPkgSource = `
+package a
+
+/*
+#cgo CFLAGS: -mnop-fun-dllimport
+
+#include <windows.h>
+
+DWORD agetthread() {
+ return GetCurrentThreadId();
+}
+*/
+import "C"
+
+func GetThread() uint32 {
+ return uint32(C.agetthread())
+}
+`
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
index 7e8a2e4..8efce4d 100644
--- a/libgo/go/runtime/crash_test.go
+++ b/libgo/go/runtime/crash_test.go
@@ -5,38 +5,41 @@
package runtime_test
import (
+ "fmt"
+ "internal/testenv"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
+ "regexp"
"runtime"
"strings"
+ "sync"
"testing"
"text/template"
)
-// testEnv excludes GODEBUG from the environment
-// to prevent its output from breaking tests that
-// are trying to parse other command output.
func testEnv(cmd *exec.Cmd) *exec.Cmd {
if cmd.Env != nil {
panic("environment already set")
}
for _, env := range os.Environ() {
+ // Exclude GODEBUG from the environment to prevent its output
+ // from breaking tests that are trying to parse other command output.
if strings.HasPrefix(env, "GODEBUG=") {
continue
}
+ // Exclude GOTRACEBACK for the same reason.
+ if strings.HasPrefix(env, "GOTRACEBACK=") {
+ continue
+ }
cmd.Env = append(cmd.Env, env)
}
return cmd
}
func executeTest(t *testing.T, templ string, data interface{}, extra ...string) string {
- t.Skip("gccgo does not have a go command")
- switch runtime.GOOS {
- case "android", "nacl":
- t.Skipf("skipping on %s", runtime.GOOS)
- }
+ testenv.MustHaveGoBuild(t)
checkStaleRuntime(t)
@@ -63,7 +66,14 @@ func executeTest(t *testing.T, templ string, data interface{}, extra ...string)
}
for i := 0; i < len(extra); i += 2 {
- if err := ioutil.WriteFile(filepath.Join(dir, extra[i]), []byte(extra[i+1]), 0666); err != nil {
+ fname := extra[i]
+ contents := extra[i+1]
+ if d, _ := filepath.Split(fname); d != "" {
+ if err := os.Mkdir(filepath.Join(dir, d), 0755); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := ioutil.WriteFile(filepath.Join(dir, fname), []byte(contents), 0666); err != nil {
t.Fatal(err)
}
}
@@ -79,14 +89,25 @@ func executeTest(t *testing.T, templ string, data interface{}, extra ...string)
return string(got)
}
+var (
+ staleRuntimeOnce sync.Once // guards init of staleRuntimeErr
+ staleRuntimeErr error
+)
+
func checkStaleRuntime(t *testing.T) {
- // 'go run' uses the installed copy of runtime.a, which may be out of date.
- out, err := testEnv(exec.Command("go", "list", "-f", "{{.Stale}}", "runtime")).CombinedOutput()
- if err != nil {
- t.Fatalf("failed to execute 'go list': %v\n%v", err, string(out))
- }
- if string(out) != "false\n" {
- t.Fatalf("Stale runtime.a. Run 'go install runtime'.")
+ staleRuntimeOnce.Do(func() {
+ // 'go run' uses the installed copy of runtime.a, which may be out of date.
+ out, err := testEnv(exec.Command("go", "list", "-f", "{{.Stale}}", "runtime")).CombinedOutput()
+ if err != nil {
+ staleRuntimeErr = fmt.Errorf("failed to execute 'go list': %v\n%v", err, string(out))
+ return
+ }
+ if string(out) != "false\n" {
+ staleRuntimeErr = fmt.Errorf("Stale runtime.a. Run 'go install runtime'.")
+ }
+ })
+ if staleRuntimeErr != nil {
+ t.Fatal(staleRuntimeErr)
}
}
@@ -205,6 +226,14 @@ func TestMainGoroutineId(t *testing.T) {
}
}
+func TestNoHelperGoroutines(t *testing.T) {
+ output := executeTest(t, noHelperGoroutinesSource, nil)
+ matches := regexp.MustCompile(`goroutine [0-9]+ \[`).FindAllStringSubmatch(output, -1)
+ if len(matches) != 1 || matches[0][0] != "goroutine 1 [" {
+ t.Fatalf("want to see only goroutine 1, see:\n%s", output)
+ }
+}
+
func TestBreakpoint(t *testing.T) {
output := executeTest(t, breakpointSource, nil)
want := "runtime.Breakpoint()"
@@ -419,6 +448,22 @@ func main() {
}
`
+const noHelperGoroutinesSource = `
+package main
+import (
+ "runtime"
+ "time"
+)
+func init() {
+ i := 0
+ runtime.SetFinalizer(&i, func(p *int) {})
+ time.AfterFunc(time.Hour, func() {})
+ panic("oops")
+}
+func main() {
+}
+`
+
const breakpointSource = `
package main
import "runtime"
@@ -514,3 +559,31 @@ func TestRecoverBeforePanicAfterGoexit(t *testing.T) {
}()
runtime.Goexit()
}
+
+func TestNetpollDeadlock(t *testing.T) {
+ output := executeTest(t, netpollDeadlockSource, nil)
+ want := "done\n"
+ if !strings.HasSuffix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+}
+
+const netpollDeadlockSource = `
+package main
+import (
+ "fmt"
+ "net"
+)
+func init() {
+ fmt.Println("dialing")
+ c, err := net.Dial("tcp", "localhost:14356")
+ if err == nil {
+ c.Close()
+ } else {
+ fmt.Println("error: ", err)
+ }
+}
+func main() {
+ fmt.Println("done")
+}
+`
diff --git a/libgo/go/runtime/crash_unix_test.go b/libgo/go/runtime/crash_unix_test.go
new file mode 100644
index 0000000..b925d02
--- /dev/null
+++ b/libgo/go/runtime/crash_unix_test.go
@@ -0,0 +1,135 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package runtime_test
+
+import (
+ "bytes"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+func TestCrashDumpsAllThreads(t *testing.T) {
+ switch runtime.GOOS {
+ case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris":
+ default:
+ t.Skipf("skipping; not supported on %v", runtime.GOOS)
+ }
+
+ // We don't use executeTest because we need to kill the
+ // program while it is running.
+
+ testenv.MustHaveGoBuild(t)
+
+ checkStaleRuntime(t)
+
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatalf("failed to create temp directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), []byte(crashDumpsAllThreadsSource), 0666); err != nil {
+ t.Fatalf("failed to create Go file: %v", err)
+ }
+
+ cmd := exec.Command("go", "build", "-o", "a.exe")
+ cmd.Dir = dir
+ out, err := testEnv(cmd).CombinedOutput()
+ if err != nil {
+ t.Fatalf("building source: %v\n%s", err, out)
+ }
+
+ cmd = exec.Command(filepath.Join(dir, "a.exe"))
+ cmd = testEnv(cmd)
+ cmd.Env = append(cmd.Env, "GOTRACEBACK=crash")
+ var outbuf bytes.Buffer
+ cmd.Stdout = &outbuf
+ cmd.Stderr = &outbuf
+
+ rp, wp, err := os.Pipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ cmd.ExtraFiles = []*os.File{wp}
+
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("starting program: %v", err)
+ }
+
+ if err := wp.Close(); err != nil {
+ t.Logf("closing write pipe: %v", err)
+ }
+ if _, err := rp.Read(make([]byte, 1)); err != nil {
+ t.Fatalf("reading from pipe: %v", err)
+ }
+
+ if err := cmd.Process.Signal(syscall.SIGQUIT); err != nil {
+ t.Fatalf("signal: %v", err)
+ }
+
+ // No point in checking the error return from Wait--we expect
+ // it to fail.
+ cmd.Wait()
+
+ // We want to see a stack trace for each thread.
+ // Before https://golang.org/cl/2811 running threads would say
+ // "goroutine running on other thread; stack unavailable".
+ out = outbuf.Bytes()
+ n := bytes.Count(out, []byte("main.loop("))
+ if n != 4 {
+ t.Errorf("found %d instances of main.loop; expected 4", n)
+ t.Logf("%s", out)
+ }
+}
+
+const crashDumpsAllThreadsSource = `
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+)
+
+func main() {
+ const count = 4
+ runtime.GOMAXPROCS(count + 1)
+
+ chans := make([]chan bool, count)
+ for i := range chans {
+ chans[i] = make(chan bool)
+ go loop(i, chans[i])
+ }
+
+ // Wait for all the goroutines to start executing.
+ for _, c := range chans {
+ <-c
+ }
+
+ // Tell our parent that all the goroutines are executing.
+ if _, err := os.NewFile(3, "pipe").WriteString("x"); err != nil {
+ fmt.Fprintf(os.Stderr, "write to pipe failed: %v\n", err)
+ os.Exit(2)
+ }
+
+ select {}
+}
+
+func loop(i int, c chan bool) {
+ close(c)
+ for {
+ for j := 0; j < 0x7fffffff; j++ {
+ }
+ }
+}
+`
diff --git a/libgo/go/runtime/debug/garbage.go b/libgo/go/runtime/debug/garbage.go
index edb3643..c3363f9 100644
--- a/libgo/go/runtime/debug/garbage.go
+++ b/libgo/go/runtime/debug/garbage.go
@@ -149,5 +149,5 @@ func SetPanicOnFault(enabled bool) bool
// WriteHeapDump writes a description of the heap and the objects in
// it to the given file descriptor.
-// The heap dump format is defined at http://golang.org/s/go13heapdump.
+// The heap dump format is defined at https://golang.org/s/go13heapdump.
func WriteHeapDump(fd uintptr)
diff --git a/libgo/go/runtime/debug/garbage_test.go b/libgo/go/runtime/debug/garbage_test.go
index 149bafc..13e1845 100644
--- a/libgo/go/runtime/debug/garbage_test.go
+++ b/libgo/go/runtime/debug/garbage_test.go
@@ -75,6 +75,10 @@ func TestReadGCStats(t *testing.T) {
var big = make([]byte, 1<<20)
func TestFreeOSMemory(t *testing.T) {
+ if runtime.GOARCH == "arm64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" ||
+ runtime.GOOS == "nacl" {
+ t.Skip("issue 9993; scavenger temporarily disabled on systems with physical pages larger than logical pages")
+ }
var ms1, ms2 runtime.MemStats
if big == nil {
diff --git a/libgo/go/runtime/debug/heapdump_test.go b/libgo/go/runtime/debug/heapdump_test.go
index 9201901..cb2f2f0 100644
--- a/libgo/go/runtime/debug/heapdump_test.go
+++ b/libgo/go/runtime/debug/heapdump_test.go
@@ -31,3 +31,39 @@ func TestWriteHeapDumpNonempty(t *testing.T) {
t.Fatalf("Heap dump size %d bytes, expected at least %d bytes", size, minSize)
}
}
+
+type Obj struct {
+ x, y int
+}
+
+func objfin(x *Obj) {
+ println("finalized", x)
+}
+
+func TestWriteHeapDumpFinalizers(t *testing.T) {
+ if runtime.GOOS == "nacl" {
+ t.Skip("WriteHeapDump is not available on NaCl.")
+ }
+ f, err := ioutil.TempFile("", "heapdumptest")
+ if err != nil {
+ t.Fatalf("TempFile failed: %v", err)
+ }
+ defer os.Remove(f.Name())
+ defer f.Close()
+
+ // bug 9172: WriteHeapDump couldn't handle more than one finalizer
+ println("allocating objects")
+ x := &Obj{}
+ runtime.SetFinalizer(x, objfin)
+ y := &Obj{}
+ runtime.SetFinalizer(y, objfin)
+
+ // Trigger collection of x and y, queueing of their finalizers.
+ println("starting gc")
+ runtime.GC()
+
+ // Make sure WriteHeapDump doesn't fail with multiple queued finalizers.
+ println("starting dump")
+ WriteHeapDump(f.Fd())
+ println("done dump")
+}
diff --git a/libgo/go/runtime/debug/stack.go b/libgo/go/runtime/debug/stack.go
index c29b0a2..ab12bff 100644
--- a/libgo/go/runtime/debug/stack.go
+++ b/libgo/go/runtime/debug/stack.go
@@ -31,7 +31,7 @@ func PrintStack() {
// then attempts to discover, for Go functions, the calling function or
// method and the text of the line containing the invocation.
//
-// This function is deprecated. Use package runtime's Stack instead.
+// Deprecated: Use package runtime's Stack instead.
func Stack() []byte {
return stack()
}
diff --git a/libgo/go/runtime/env_posix.go b/libgo/go/runtime/env_posix.go
deleted file mode 100644
index 8b1dbb7..0000000
--- a/libgo/go/runtime/env_posix.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
-
-package runtime
-
-import "unsafe"
-
-func environ() []string
-
-func getenv(s *byte) *byte {
- val := gogetenv(gostringnocopy(s))
- if val == "" {
- return nil
- }
- // Strings found in environment are NUL-terminated.
- return &bytes(val)[0]
-}
-
-func gogetenv(key string) string {
- env := environ()
- if env == nil {
- gothrow("getenv before env init")
- }
- for _, s := range environ() {
- if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key {
- return s[len(key)+1:]
- }
- }
- return ""
-}
-
-//extern setenv
-func _cgo_setenv(unsafe.Pointer, unsafe.Pointer, int32)
-
-//extern unsetenv
-func _cgo_unsetenv(unsafe.Pointer)
-
-// Update the C environment if cgo is loaded.
-// Called from syscall.Setenv.
-func syscall_setenv_c(k string, v string) {
- _cgo_setenv(cstring(k), cstring(v), 1)
-}
-
-// Update the C environment if cgo is loaded.
-// Called from syscall.unsetenv.
-func syscall_unsetenv_c(k string) {
- _cgo_unsetenv(cstring(k))
-}
-
-func cstring(s string) unsafe.Pointer {
- p := make([]byte, len(s)+1)
- sp := (*_string)(unsafe.Pointer(&s))
- memmove(unsafe.Pointer(&p[0]), unsafe.Pointer(sp.str), uintptr(len(s)))
- return unsafe.Pointer(&p[0])
-}
diff --git a/libgo/go/runtime/env_test.go b/libgo/go/runtime/env_test.go
new file mode 100644
index 0000000..2399e46
--- /dev/null
+++ b/libgo/go/runtime/env_test.go
@@ -0,0 +1,47 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "runtime"
+ "syscall"
+ "testing"
+)
+
+func TestFixedGOROOT(t *testing.T) {
+ if runtime.GOOS == "plan9" {
+ t.Skipf("skipping plan9, it is inconsistent by allowing GOROOT to be updated by Setenv")
+ }
+
+ // Restore both the real GOROOT environment variable, and runtime's copies:
+ if orig, ok := syscall.Getenv("GOROOT"); ok {
+ defer syscall.Setenv("GOROOT", orig)
+ } else {
+ defer syscall.Unsetenv("GOROOT")
+ }
+ envs := runtime.Envs()
+ oldenvs := append([]string{}, envs...)
+ defer runtime.SetEnvs(oldenvs)
+
+ // attempt to reuse existing envs backing array.
+ want := runtime.GOROOT()
+ runtime.SetEnvs(append(envs[:0], "GOROOT="+want))
+
+ if got := runtime.GOROOT(); got != want {
+ t.Errorf(`initial runtime.GOROOT()=%q, want %q`, got, want)
+ }
+ if err := syscall.Setenv("GOROOT", "/os"); err != nil {
+ t.Fatal(err)
+ }
+ if got := runtime.GOROOT(); got != want {
+ t.Errorf(`after setenv runtime.GOROOT()=%q, want %q`, got, want)
+ }
+ if err := syscall.Unsetenv("GOROOT"); err != nil {
+ t.Fatal(err)
+ }
+ if got := runtime.GOROOT(); got != want {
+ t.Errorf(`after unsetenv runtime.GOROOT()=%q, want %q`, got, want)
+ }
+}
diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go
index d759a54..c4621b6 100644
--- a/libgo/go/runtime/error.go
+++ b/libgo/go/runtime/error.go
@@ -9,9 +9,9 @@ type Error interface {
error
// RuntimeError is a no-op function but
- // serves to distinguish types that are runtime
+ // serves to distinguish types that are run time
// errors from ordinary errors: a type is a
- // runtime error if it has a RuntimeError method.
+ // run time error if it has a RuntimeError method.
RuntimeError()
}
diff --git a/libgo/go/runtime/arch_386.go b/libgo/go/runtime/export_arm_test.go
index 79d38c7..446d264 100644
--- a/libgo/go/runtime/arch_386.go
+++ b/libgo/go/runtime/export_arm_test.go
@@ -1,8 +1,9 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Export guts for testing.
+
package runtime
-type uintreg uint32
-type intptr int32 // TODO(rsc): remove
+var Usplit = usplit
diff --git a/libgo/go/runtime/arch_amd64p32.go b/libgo/go/runtime/export_linux_test.go
index 5c636ae..37cf164 100644
--- a/libgo/go/runtime/arch_amd64p32.go
+++ b/libgo/go/runtime/export_linux_test.go
@@ -1,8 +1,9 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Export guts for testing.
+
package runtime
-type uintreg uint64
-type intptr int32 // TODO(rsc): remove
+//var NewOSProc0 = newosproc0
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 165bebf..8782914 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -6,26 +6,33 @@
package runtime
-var Fadd64 = fadd64
-var Fsub64 = fsub64
-var Fmul64 = fmul64
-var Fdiv64 = fdiv64
-var F64to32 = f64to32
-var F32to64 = f32to64
-var Fcmp64 = fcmp64
-var Fintto64 = fintto64
-var F64toint = f64toint
-
-func entersyscall()
-func exitsyscall()
+import "unsafe"
+
+//var Fadd64 = fadd64
+//var Fsub64 = fsub64
+//var Fmul64 = fmul64
+//var Fdiv64 = fdiv64
+//var F64to32 = f64to32
+//var F32to64 = f32to64
+//var Fcmp64 = fcmp64
+//var Fintto64 = fintto64
+//var F64toint = f64toint
+//var Sqrt = sqrt
+
+func entersyscall(int32)
+func exitsyscall(int32)
func golockedOSThread() bool
var Entersyscall = entersyscall
var Exitsyscall = exitsyscall
var LockedOSThread = golockedOSThread
+// var Xadduintptr = xadduintptr
+
+// var FuncPC = funcPC
+
type LFNode struct {
- Next *LFNode
+ Next uint64
Pushcnt uintptr
}
@@ -36,18 +43,16 @@ var LFStackPush = lfstackpush_go
var LFStackPop = lfstackpop_go
type ParFor struct {
- body *byte
- done uint32
- Nthr uint32
- nthrmax uint32
- thrseq uint32
- Cnt uint32
- Ctx *byte
- wait bool
+ body func(*ParFor, uint32)
+ done uint32
+ Nthr uint32
+ thrseq uint32
+ Cnt uint32
+ wait bool
}
func newParFor(nthrmax uint32) *ParFor
-func parForSetup(desc *ParFor, nthr, n uint32, ctx *byte, wait bool, body func(*ParFor, uint32))
+func parForSetup(desc *ParFor, nthr, n uint32, wait bool, body func(*ParFor, uint32))
func parForDo(desc *ParFor)
func parForIters(desc *ParFor, tid uintptr) (uintptr, uintptr)
@@ -60,31 +65,110 @@ func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
return uint32(begin), uint32(end)
}
-func testSchedLocalQueue()
-func testSchedLocalQueueSteal()
+func GCMask(x interface{}) (ret []byte) {
+ return nil
+}
+
+//func testSchedLocalQueue()
+//func testSchedLocalQueueSteal()
+//
+//func RunSchedLocalQueueTest() {
+// testSchedLocalQueue()
+//}
+//
+//func RunSchedLocalQueueStealTest() {
+// testSchedLocalQueueSteal()
+//}
+
+//var StringHash = stringHash
+//var BytesHash = bytesHash
+//var Int32Hash = int32Hash
+//var Int64Hash = int64Hash
+//var EfaceHash = efaceHash
+//var IfaceHash = ifaceHash
+//var MemclrBytes = memclrBytes
+
+// var HashLoad = &hashLoad
+
+// entry point for testing
+//func GostringW(w []uint16) (s string) {
+// s = gostringw(&w[0])
+// return
+//}
+
+//var Gostringnocopy = gostringnocopy
+//var Maxstring = &maxstring
+
+//type Uintreg uintreg
+
+//extern __go_open
+func open(path *byte, mode int32, perm int32) int32
+
+func Open(path *byte, mode int32, perm int32) int32 {
+ return open(path, mode, perm)
+}
+
+//extern close
+func close(int32) int32
-var TestSchedLocalQueue1 = testSchedLocalQueue
-var TestSchedLocalQueueSteal1 = testSchedLocalQueueSteal
+func Close(fd int32) int32 {
+ return close(fd)
+}
-// func haveGoodHash() bool
-// func stringHash(s string, seed uintptr) uintptr
-// func bytesHash(b []byte, seed uintptr) uintptr
-// func int32Hash(i uint32, seed uintptr) uintptr
-// func int64Hash(i uint64, seed uintptr) uintptr
+//extern read
+func read(fd int32, buf unsafe.Pointer, size int32) int32
-// var HaveGoodHash = haveGoodHash
-// var StringHash = stringHash
-// var BytesHash = bytesHash
-// var Int32Hash = int32Hash
-// var Int64Hash = int64Hash
+func Read(fd int32, buf unsafe.Pointer, size int32) int32 {
+ return read(fd, buf, size)
+}
-var hashLoad float64 // declared in hashmap.c
-var HashLoad = &hashLoad
+//extern write
+func write(fd int32, buf unsafe.Pointer, size int32) int32
-func memclrBytes(b []byte)
+func Write(fd uintptr, buf unsafe.Pointer, size int32) int32 {
+ return write(int32(fd), buf, size)
+}
-var MemclrBytes = memclrBytes
+func envs() []string
+func setenvs([]string)
+
+var Envs = envs
+var SetEnvs = setenvs
+
+//var BigEndian = _BigEndian
+
+// For benchmarking.
+
+/*
+func BenchSetType(n int, x interface{}) {
+ e := *(*eface)(unsafe.Pointer(&x))
+ t := e._type
+ var size uintptr
+ var p unsafe.Pointer
+ switch t.kind & kindMask {
+ case _KindPtr:
+ t = (*ptrtype)(unsafe.Pointer(t)).elem
+ size = t.size
+ p = e.data
+ case _KindSlice:
+ slice := *(*struct {
+ ptr unsafe.Pointer
+ len, cap uintptr
+ })(e.data)
+ t = (*slicetype)(unsafe.Pointer(t)).elem
+ size = t.size * slice.len
+ p = slice.ptr
+ }
+ allocSize := roundupsize(size)
+ systemstack(func() {
+ for i := 0; i < n; i++ {
+ heapBitsSetType(uintptr(p), allocSize, size, t)
+ }
+ })
+}
-// func gogoBytes() int32
+const PtrSize = ptrSize
-// var GogoBytes = gogoBytes
+var TestingAssertE2I2GC = &testingAssertE2I2GC
+var TestingAssertE2T2GC = &testingAssertE2T2GC
+*/
diff --git a/libgo/go/runtime/arch_amd64.go b/libgo/go/runtime/export_windows_test.go
index 270cd7b..61fcef9 100644
--- a/libgo/go/runtime/arch_amd64.go
+++ b/libgo/go/runtime/export_windows_test.go
@@ -1,8 +1,9 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Export guts for testing.
+
package runtime
-type uintreg uint64
-type intptr int64 // TODO(rsc): remove
+var TestingWER = &testingWER
diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go
index 3c3e427..1f6b13e 100644
--- a/libgo/go/runtime/extern.go
+++ b/libgo/go/runtime/extern.go
@@ -19,10 +19,10 @@ A collection is triggered when the ratio of freshly allocated data to live data
remaining after the previous collection reaches this percentage. The default
is GOGC=100. Setting GOGC=off disables the garbage collector entirely.
The runtime/debug package's SetGCPercent function allows changing this
-percentage at run time. See http://golang.org/pkg/runtime/debug/#SetGCPercent.
+percentage at run time. See https://golang.org/pkg/runtime/debug/#SetGCPercent.
-The GODEBUG variable controls debug output from the runtime. GODEBUG value is
-a comma-separated list of name=val pairs. Supported names are:
+The GODEBUG variable controls debugging variables within the runtime.
+It is a comma-separated list of name=val pairs setting these named variables:
allocfreetrace: setting allocfreetrace=1 causes every allocation to be
profiled and a stack trace printed on each object's allocation and free.
@@ -31,18 +31,61 @@ a comma-separated list of name=val pairs. Supported names are:
where each object is allocated on a unique page and addresses are
never recycled.
+ gccheckmark: setting gccheckmark=1 enables verification of the
+ garbage collector's concurrent mark phase by performing a
+ second mark pass while the world is stopped. If the second
+ pass finds a reachable object that was not found by concurrent
+ mark, the garbage collector will panic.
+
+ gcpacertrace: setting gcpacertrace=1 causes the garbage collector to
+ print information about the internal state of the concurrent pacer.
+
+ gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines
+ onto smaller stacks. In this mode, a goroutine's stack can only grow.
+
+ gcstackbarrieroff: setting gcstackbarrieroff=1 disables the use of stack barriers
+ that allow the garbage collector to avoid repeating a stack scan during the
+ mark termination phase.
+
+ gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection,
+ making every garbage collection a stop-the-world event. Setting gcstoptheworld=2
+ also disables concurrent sweeping after the garbage collection finishes.
+
gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard
error at each collection, summarizing the amount of memory collected and the
length of the pause. Setting gctrace=2 emits the same summary but also
- repeats each collection.
-
- gcdead: setting gcdead=1 causes the garbage collector to clobber all stack slots
- that it thinks are dead.
+ repeats each collection. The format of this line is subject to change.
+ Currently, it is:
+ gc # @#s #%: #+...+# ms clock, #+...+# ms cpu, #->#-># MB, # MB goal, # P
+ where the fields are as follows:
+ gc # the GC number, incremented at each GC
+ @#s time in seconds since program start
+ #% percentage of time spent in GC since program start
+ #+...+# wall-clock/CPU times for the phases of the GC
+ #->#-># MB heap size at GC start, at GC end, and live heap
+ # MB goal goal heap size
+ # P number of processors used
+ The phases are stop-the-world (STW) sweep termination, scan,
+ synchronize Ps, mark, and STW mark termination. The CPU times
+ for mark are broken down in to assist time (GC performed in
+ line with allocation), background GC time, and idle GC time.
+ If the line ends with "(forced)", this GC was forced by a
+ runtime.GC() call and all phases are STW.
+
+ memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.
+ When set to 0 memory profiling is disabled. Refer to the description of
+ MemProfileRate for the default value.
memprofilerate: setting memprofilerate=X changes the setting for
runtime.MemProfileRate. Refer to the description of this variable for how
it is used and its default value.
+ sbrk: setting sbrk=1 replaces the memory allocator and garbage collector
+ with a trivial allocator that obtains memory from the operating system and
+ never reclaims any memory.
+
+ scavenge: scavenge=1 enables debugging mode of heap scavenger.
+
scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
detailed multiline info every X milliseconds, describing state of the scheduler,
processors, threads and goroutines.
@@ -70,7 +113,7 @@ core dump.
The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete
the set of Go environment variables. They influence the building of Go programs
-(see http://golang.org/cmd/go and http://golang.org/pkg/go/build).
+(see https://golang.org/cmd/go and https://golang.org/pkg/go/build).
GOARCH, GOOS, and GOROOT are recorded at compile time and made available by
constants or functions in this package, but they do not influence the execution
of the run-time system.
diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go
index fe9e839..262f87d 100644
--- a/libgo/go/runtime/gc_test.go
+++ b/libgo/go/runtime/gc_test.go
@@ -5,7 +5,8 @@
package runtime_test
import (
- // "os"
+ "os"
+ "reflect"
"runtime"
"runtime/debug"
"testing"
@@ -14,7 +15,6 @@ import (
)
func TestGcSys(t *testing.T) {
- /* gccgo does not have a go command
if os.Getenv("GOGC") == "off" {
t.Skip("skipping test; GOGC=off in environment")
}
@@ -24,7 +24,6 @@ func TestGcSys(t *testing.T) {
if got != want {
t.Fatalf("expected %q, but got %q", want, got)
}
- */
}
const testGCSysSource = `
@@ -199,45 +198,166 @@ func TestHugeGCInfo(t *testing.T) {
}
}
-func BenchmarkSetTypeNoPtr1(b *testing.B) {
- type NoPtr1 struct {
- p uintptr
- }
- var p *NoPtr1
- for i := 0; i < b.N; i++ {
- p = &NoPtr1{}
- }
- _ = p
+func BenchmarkSetTypePtr(b *testing.B) {
+ benchSetType(b, new(*byte))
}
-func BenchmarkSetTypeNoPtr2(b *testing.B) {
- type NoPtr2 struct {
- p, q uintptr
- }
- var p *NoPtr2
- for i := 0; i < b.N; i++ {
- p = &NoPtr2{}
- }
- _ = p
+
+func BenchmarkSetTypePtr8(b *testing.B) {
+ benchSetType(b, new([8]*byte))
}
-func BenchmarkSetTypePtr1(b *testing.B) {
- type Ptr1 struct {
- p *byte
- }
- var p *Ptr1
- for i := 0; i < b.N; i++ {
- p = &Ptr1{}
- }
- _ = p
+
+func BenchmarkSetTypePtr16(b *testing.B) {
+ benchSetType(b, new([16]*byte))
}
-func BenchmarkSetTypePtr2(b *testing.B) {
- type Ptr2 struct {
- p, q *byte
- }
- var p *Ptr2
- for i := 0; i < b.N; i++ {
- p = &Ptr2{}
- }
- _ = p
+
+func BenchmarkSetTypePtr32(b *testing.B) {
+ benchSetType(b, new([32]*byte))
+}
+
+func BenchmarkSetTypePtr64(b *testing.B) {
+ benchSetType(b, new([64]*byte))
+}
+
+func BenchmarkSetTypePtr126(b *testing.B) {
+ benchSetType(b, new([126]*byte))
+}
+
+func BenchmarkSetTypePtr128(b *testing.B) {
+ benchSetType(b, new([128]*byte))
+}
+
+func BenchmarkSetTypePtrSlice(b *testing.B) {
+ benchSetType(b, make([]*byte, 1<<10))
+}
+
+type Node1 struct {
+ Value [1]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode1(b *testing.B) {
+ benchSetType(b, new(Node1))
+}
+
+func BenchmarkSetTypeNode1Slice(b *testing.B) {
+ benchSetType(b, make([]Node1, 32))
+}
+
+type Node8 struct {
+ Value [8]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode8(b *testing.B) {
+ benchSetType(b, new(Node8))
+}
+
+func BenchmarkSetTypeNode8Slice(b *testing.B) {
+ benchSetType(b, make([]Node8, 32))
+}
+
+type Node64 struct {
+ Value [64]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode64(b *testing.B) {
+ benchSetType(b, new(Node64))
+}
+
+func BenchmarkSetTypeNode64Slice(b *testing.B) {
+ benchSetType(b, make([]Node64, 32))
+}
+
+type Node64Dead struct {
+ Left, Right *byte
+ Value [64]uintptr
+}
+
+func BenchmarkSetTypeNode64Dead(b *testing.B) {
+ benchSetType(b, new(Node64Dead))
+}
+
+func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
+ benchSetType(b, make([]Node64Dead, 32))
+}
+
+type Node124 struct {
+ Value [124]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode124(b *testing.B) {
+ benchSetType(b, new(Node124))
+}
+
+func BenchmarkSetTypeNode124Slice(b *testing.B) {
+ benchSetType(b, make([]Node124, 32))
+}
+
+type Node126 struct {
+ Value [126]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode126(b *testing.B) {
+ benchSetType(b, new(Node126))
+}
+
+func BenchmarkSetTypeNode126Slice(b *testing.B) {
+ benchSetType(b, make([]Node126, 32))
+}
+
+type Node128 struct {
+ Value [128]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode128(b *testing.B) {
+ benchSetType(b, new(Node128))
+}
+
+func BenchmarkSetTypeNode128Slice(b *testing.B) {
+ benchSetType(b, make([]Node128, 32))
+}
+
+type Node130 struct {
+ Value [130]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode130(b *testing.B) {
+ benchSetType(b, new(Node130))
+}
+
+func BenchmarkSetTypeNode130Slice(b *testing.B) {
+ benchSetType(b, make([]Node130, 32))
+}
+
+type Node1024 struct {
+ Value [1024]uintptr
+ Left, Right *byte
+}
+
+func BenchmarkSetTypeNode1024(b *testing.B) {
+ benchSetType(b, new(Node1024))
+}
+
+func BenchmarkSetTypeNode1024Slice(b *testing.B) {
+ benchSetType(b, make([]Node1024, 32))
+}
+
+func benchSetType(b *testing.B, x interface{}) {
+ v := reflect.ValueOf(x)
+ t := v.Type()
+ switch t.Kind() {
+ case reflect.Ptr:
+ b.SetBytes(int64(t.Elem().Size()))
+ case reflect.Slice:
+ b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
+ }
+ b.ResetTimer()
+ //runtime.BenchSetType(b.N, x)
}
func BenchmarkAllocation(b *testing.B) {
@@ -292,3 +412,63 @@ func TestPrintGC(t *testing.T) {
}
close(done)
}
+
+/*
+
+// The implicit y, ok := x.(error) for the case error
+// in testTypeSwitch used to not initialize the result y
+// before passing &y to assertE2I2GC.
+// Catch this by making assertE2I2 call runtime.GC,
+// which will force a stack scan and failure if there are
+// bad pointers, and then fill the stack with bad pointers
+// and run the type switch.
+func TestAssertE2I2Liveness(t *testing.T) {
+ // Note that this flag is defined in export_test.go
+ // and is not available to ordinary imports of runtime.
+ *runtime.TestingAssertE2I2GC = true
+ defer func() {
+ *runtime.TestingAssertE2I2GC = false
+ }()
+
+ poisonStack()
+ testTypeSwitch(io.EOF)
+ poisonStack()
+ testAssert(io.EOF)
+ poisonStack()
+ testAssertVar(io.EOF)
+}
+
+func poisonStack() uintptr {
+ var x [1000]uintptr
+ for i := range x {
+ x[i] = 0xff
+ }
+ return x[123]
+}
+
+func testTypeSwitch(x interface{}) error {
+ switch y := x.(type) {
+ case nil:
+ // ok
+ case error:
+ return y
+ }
+ return nil
+}
+
+func testAssert(x interface{}) error {
+ if y, ok := x.(error); ok {
+ return y
+ }
+ return nil
+}
+
+func testAssertVar(x interface{}) error {
+ var y, ok = x.(error)
+ if ok {
+ return y
+ }
+ return nil
+}
+
+*/
diff --git a/libgo/go/runtime/gcinfo_test.go b/libgo/go/runtime/gcinfo_test.go
index 0044992..7e345e5 100644
--- a/libgo/go/runtime/gcinfo_test.go
+++ b/libgo/go/runtime/gcinfo_test.go
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build ignore
+
package runtime_test
import (
@@ -10,25 +12,16 @@ import (
"testing"
)
+const (
+ typeScalar = 0
+ typePointer = 1
+)
+
// TestGCInfo tests that various objects in heap, data and bss receive correct GC pointer type info.
func TestGCInfo(t *testing.T) {
t.Skip("skipping on gccgo for now")
- verifyGCInfo(t, "bss ScalarPtr", &bssScalarPtr, nonStackInfo(infoScalarPtr))
- verifyGCInfo(t, "bss PtrScalar", &bssPtrScalar, nonStackInfo(infoPtrScalar))
- verifyGCInfo(t, "bss BigStruct", &bssBigStruct, nonStackInfo(infoBigStruct()))
- verifyGCInfo(t, "bss string", &bssString, nonStackInfo(infoString))
- verifyGCInfo(t, "bss slice", &bssSlice, nonStackInfo(infoSlice))
- verifyGCInfo(t, "bss eface", &bssEface, nonStackInfo(infoEface))
- verifyGCInfo(t, "bss iface", &bssIface, nonStackInfo(infoIface))
-
- verifyGCInfo(t, "data ScalarPtr", &dataScalarPtr, nonStackInfo(infoScalarPtr))
- verifyGCInfo(t, "data PtrScalar", &dataPtrScalar, nonStackInfo(infoPtrScalar))
- verifyGCInfo(t, "data BigStruct", &dataBigStruct, nonStackInfo(infoBigStruct()))
- verifyGCInfo(t, "data string", &dataString, nonStackInfo(infoString))
- verifyGCInfo(t, "data slice", &dataSlice, nonStackInfo(infoSlice))
- verifyGCInfo(t, "data eface", &dataEface, nonStackInfo(infoEface))
- verifyGCInfo(t, "data iface", &dataIface, nonStackInfo(infoIface))
+ verifyGCInfo(t, "stack Ptr", new(Ptr), infoPtr)
verifyGCInfo(t, "stack ScalarPtr", new(ScalarPtr), infoScalarPtr)
verifyGCInfo(t, "stack PtrScalar", new(PtrScalar), infoPtrScalar)
verifyGCInfo(t, "stack BigStruct", new(BigStruct), infoBigStruct())
@@ -38,40 +31,43 @@ func TestGCInfo(t *testing.T) {
verifyGCInfo(t, "stack iface", new(Iface), infoIface)
for i := 0; i < 10; i++ {
- verifyGCInfo(t, "heap ScalarPtr", escape(new(ScalarPtr)), nonStackInfo(infoScalarPtr))
- verifyGCInfo(t, "heap PtrScalar", escape(new(PtrScalar)), nonStackInfo(infoPtrScalar))
- verifyGCInfo(t, "heap BigStruct", escape(new(BigStruct)), nonStackInfo(infoBigStruct()))
- verifyGCInfo(t, "heap string", escape(new(string)), nonStackInfo(infoString))
- verifyGCInfo(t, "heap eface", escape(new(interface{})), nonStackInfo(infoEface))
- verifyGCInfo(t, "heap iface", escape(new(Iface)), nonStackInfo(infoIface))
+ verifyGCInfo(t, "heap Ptr", escape(new(Ptr)), trimDead(padDead(infoPtr)))
+ verifyGCInfo(t, "heap PtrSlice", escape(&make([]*byte, 10)[0]), trimDead(infoPtr10))
+ verifyGCInfo(t, "heap ScalarPtr", escape(new(ScalarPtr)), trimDead(infoScalarPtr))
+ verifyGCInfo(t, "heap ScalarPtrSlice", escape(&make([]ScalarPtr, 4)[0]), trimDead(infoScalarPtr4))
+ verifyGCInfo(t, "heap PtrScalar", escape(new(PtrScalar)), trimDead(infoPtrScalar))
+ verifyGCInfo(t, "heap BigStruct", escape(new(BigStruct)), trimDead(infoBigStruct()))
+ verifyGCInfo(t, "heap string", escape(new(string)), trimDead(infoString))
+ verifyGCInfo(t, "heap eface", escape(new(interface{})), trimDead(infoEface))
+ verifyGCInfo(t, "heap iface", escape(new(Iface)), trimDead(infoIface))
}
-
}
func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) {
- mask := /* runtime.GCMask(p) */ []byte(nil)
- if len(mask) > len(mask0) {
- mask0 = append(mask0, BitsDead)
- mask = mask[:len(mask0)]
- }
+ mask := runtime.GCMask(p)
if bytes.Compare(mask, mask0) != 0 {
t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask)
return
}
}
-func nonStackInfo(mask []byte) []byte {
- // BitsDead is replaced with BitsScalar everywhere except stacks.
- mask1 := make([]byte, len(mask))
- mw := false
- for i, v := range mask {
- if !mw && v == BitsDead {
- v = BitsScalar
- }
- mw = !mw && v == BitsMultiWord
- mask1[i] = v
+func padDead(mask []byte) []byte {
+ // Because the dead bit isn't encoded until the third word,
+ // and because on 32-bit systems a one-word allocation
+ // uses a two-word block, the pointer info for a one-word
+ // object needs to be expanded to include an extra scalar
+ // on 32-bit systems to match the heap bitmap.
+ if runtime.PtrSize == 4 && len(mask) == 1 {
+ return []byte{mask[0], 0}
}
- return mask1
+ return mask
+}
+
+func trimDead(mask []byte) []byte {
+ for len(mask) > 2 && mask[len(mask)-1] == typeScalar {
+ mask = mask[:len(mask)-1]
+ }
+ return mask
}
var gcinfoSink interface{}
@@ -81,19 +77,13 @@ func escape(p interface{}) interface{} {
return p
}
-const (
- BitsDead = iota
- BitsScalar
- BitsPointer
- BitsMultiWord
-)
+var infoPtr = []byte{typePointer}
-const (
- BitsString = iota // unused
- BitsSlice // unused
- BitsIface
- BitsEface
-)
+type Ptr struct {
+ *byte
+}
+
+var infoPtr10 = []byte{typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer}
type ScalarPtr struct {
q int
@@ -104,7 +94,9 @@ type ScalarPtr struct {
y *int
}
-var infoScalarPtr = []byte{BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer}
+var infoScalarPtr = []byte{typeScalar, typePointer, typeScalar, typePointer, typeScalar, typePointer}
+
+var infoScalarPtr4 = append(append(append(append([]byte(nil), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...)
type PtrScalar struct {
q *int
@@ -115,7 +107,7 @@ type PtrScalar struct {
y int
}
-var infoPtrScalar = []byte{BitsPointer, BitsScalar, BitsPointer, BitsScalar, BitsPointer, BitsScalar}
+var infoPtrScalar = []byte{typePointer, typeScalar, typePointer, typeScalar, typePointer, typeScalar}
type BigStruct struct {
q *int
@@ -132,27 +124,27 @@ func infoBigStruct() []byte {
switch runtime.GOARCH {
case "386", "arm":
return []byte{
- BitsPointer, // q *int
- BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte
- BitsPointer, BitsDead, BitsDead, // r []byte
- BitsScalar, BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64
- BitsPointer, BitsDead, // i string
+ typePointer, // q *int
+ typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
+ typePointer, typeScalar, typeScalar, // r []byte
+ typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
+ typePointer, typeScalar, // i string
}
- case "amd64":
+ case "arm64", "amd64", "ppc64", "ppc64le":
return []byte{
- BitsPointer, // q *int
- BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte
- BitsPointer, BitsDead, BitsDead, // r []byte
- BitsScalar, BitsScalar, BitsScalar, // t int; y uint16; u uint64
- BitsPointer, BitsDead, // i string
+ typePointer, // q *int
+ typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
+ typePointer, typeScalar, typeScalar, // r []byte
+ typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
+ typePointer, typeScalar, // i string
}
case "amd64p32":
return []byte{
- BitsPointer, // q *int
- BitsScalar, BitsScalar, BitsScalar, BitsScalar, BitsScalar, // w byte; e [17]byte
- BitsPointer, BitsDead, BitsDead, // r []byte
- BitsScalar, BitsScalar, BitsDead, BitsScalar, BitsScalar, // t int; y uint16; u uint64
- BitsPointer, BitsDead, // i string
+ typePointer, // q *int
+ typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
+ typePointer, typeScalar, typeScalar, // r []byte
+ typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
+ typePointer, typeScalar, // i string
}
default:
panic("unknown arch")
@@ -170,6 +162,7 @@ func (IfaceImpl) f() {
var (
// BSS
+ bssPtr Ptr
bssScalarPtr ScalarPtr
bssPtrScalar PtrScalar
bssBigStruct BigStruct
@@ -179,6 +172,7 @@ var (
bssIface Iface
// DATA
+ dataPtr = Ptr{new(byte)}
dataScalarPtr = ScalarPtr{q: 1}
dataPtrScalar = PtrScalar{w: 1}
dataBigStruct = BigStruct{w: 1}
@@ -187,8 +181,8 @@ var (
dataEface interface{} = 42
dataIface Iface = IfaceImpl(42)
- infoString = []byte{BitsPointer, BitsDead}
- infoSlice = []byte{BitsPointer, BitsDead, BitsDead}
- infoEface = []byte{BitsMultiWord, BitsEface}
- infoIface = []byte{BitsMultiWord, BitsIface}
+ infoString = []byte{typePointer, typeScalar}
+ infoSlice = []byte{typePointer, typeScalar, typeScalar}
+ infoEface = []byte{typePointer, typePointer}
+ infoIface = []byte{typePointer, typePointer}
)
diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go
deleted file mode 100644
index 791af8c..0000000
--- a/libgo/go/runtime/hashmap.go
+++ /dev/null
@@ -1,960 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// This file contains the implementation of Go's map type.
-//
-// A map is just a hash table. The data is arranged
-// into an array of buckets. Each bucket contains up to
-// 8 key/value pairs. The low-order bits of the hash are
-// used to select a bucket. Each bucket contains a few
-// high-order bits of each hash to distinguish the entries
-// within a single bucket.
-//
-// If more than 8 keys hash to a bucket, we chain on
-// extra buckets.
-//
-// When the hashtable grows, we allocate a new array
-// of buckets twice as big. Buckets are incrementally
-// copied from the old bucket array to the new bucket array.
-//
-// Map iterators walk through the array of buckets and
-// return the keys in walk order (bucket #, then overflow
-// chain order, then bucket index). To maintain iteration
-// semantics, we never move keys within their bucket (if
-// we did, keys might be returned 0 or 2 times). When
-// growing the table, iterators remain iterating through the
-// old table and must check the new table if the bucket
-// they are iterating through has been moved ("evacuated")
-// to the new table.
-
-// Picking loadFactor: too large and we have lots of overflow
-// buckets, too small and we waste a lot of space. I wrote
-// a simple program to check some stats for different loads:
-// (64-bit, 8 byte keys and values)
-// loadFactor %overflow bytes/entry hitprobe missprobe
-// 4.00 2.13 20.77 3.00 4.00
-// 4.50 4.05 17.30 3.25 4.50
-// 5.00 6.85 14.77 3.50 5.00
-// 5.50 10.55 12.94 3.75 5.50
-// 6.00 15.27 11.67 4.00 6.00
-// 6.50 20.90 10.79 4.25 6.50
-// 7.00 27.14 10.15 4.50 7.00
-// 7.50 34.03 9.73 4.75 7.50
-// 8.00 41.10 9.40 5.00 8.00
-//
-// %overflow = percentage of buckets which have an overflow bucket
-// bytes/entry = overhead bytes used per key/value pair
-// hitprobe = # of entries to check when looking up a present key
-// missprobe = # of entries to check when looking up an absent key
-//
-// Keep in mind this data is for maximally loaded tables, i.e. just
-// before the table grows. Typical tables will be somewhat less loaded.
-
-import (
- "unsafe"
-)
-
-const (
- // Maximum number of key/value pairs a bucket can hold.
- bucketCntBits = 3
- bucketCnt = 1 << bucketCntBits
-
- // Maximum average load of a bucket that triggers growth.
- loadFactor = 6.5
-
- // Maximum key or value size to keep inline (instead of mallocing per element).
- // Must fit in a uint8.
- // Fast versions cannot handle big values - the cutoff size for
- // fast versions in ../../cmd/gc/walk.c must be at most this value.
- maxKeySize = 128
- maxValueSize = 128
-
- // data offset should be the size of the bmap struct, but needs to be
- // aligned correctly. For amd64p32 this means 64-bit alignment
- // even though pointers are 32 bit.
- dataOffset = unsafe.Offsetof(struct {
- b bmap
- v int64
- }{}.v)
-
- // Possible tophash values. We reserve a few possibilities for special marks.
- // Each bucket (including its overflow buckets, if any) will have either all or none of its
- // entries in the evacuated* states (except during the evacuate() method, which only happens
- // during map writes and thus no one else can observe the map during that time).
- empty = 0 // cell is empty
- evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
- evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
- evacuatedY = 3 // same as above, but evacuated to second half of larger table.
- minTopHash = 4 // minimum tophash for a normal filled cell.
-
- // flags
- iterator = 1 // there may be an iterator using buckets
- oldIterator = 2 // there may be an iterator using oldbuckets
-
- // sentinel bucket ID for iterator checks
- noCheck = 1<<(8*ptrSize) - 1
-
- // trigger a garbage collection at every alloc called from this code
- checkgc = false
-)
-
-// A header for a Go map.
-type hmap struct {
- // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
- // ../reflect/type.go. Don't change this structure without also changing that code!
- count int // # live cells == size of map. Must be first (used by len() builtin)
- flags uint32
- hash0 uint32 // hash seed
- B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
-
- buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
-}
-
-// A bucket for a Go map.
-type bmap struct {
- tophash [bucketCnt]uint8
- // Followed by bucketCnt keys and then bucketCnt values.
- // NOTE: packing all the keys together and then all the values together makes the
- // code a bit more complicated than alternating key/value/key/value/... but it allows
- // us to eliminate padding which would be needed for, e.g., map[int64]int8.
- // Followed by an overflow pointer.
-}
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/gc/reflect.c to indicate
-// the layout of this structure.
-type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c).
- value unsafe.Pointer // Must be in second position (see cmd/gc/range.c).
- t *maptype
- h *hmap
- buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
- bptr *bmap // current bucket
- startBucket uintptr // bucket iteration started at
- offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
- wrapped bool // already wrapped around from end of bucket array to beginning
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
-}
-
-func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > empty && h < minTopHash
-}
-
-func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize))
-}
-func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf
-}
-
-func makemap(t *maptype, hint int64) *hmap {
- if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
- gothrow("bad hmap size")
- }
-
- if hint < 0 || int64(int32(hint)) != hint {
- panic("makemap: size out of range")
- // TODO: make hint an int, then none of this nonsense
- }
-
- if !ismapkey(t.key) {
- gothrow("runtime.makemap: unsupported map key type")
- }
-
- // check compiler's and reflect's math
- if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
- gothrow("key size wrong")
- }
- if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
- t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
- gothrow("value size wrong")
- }
-
- // invariants we depend on. We should probably check these at compile time
- // somewhere, but for now we'll do it here.
- if t.key.align > bucketCnt {
- gothrow("key align too big")
- }
- if t.elem.align > bucketCnt {
- gothrow("value align too big")
- }
- if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
- gothrow("key size not a multiple of key align")
- }
- if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
- gothrow("value size not a multiple of value align")
- }
- if bucketCnt < 8 {
- gothrow("bucketsize too small for proper alignment")
- }
- if dataOffset%uintptr(t.key.align) != 0 {
- gothrow("need padding in bucket (key)")
- }
- if dataOffset%uintptr(t.elem.align) != 0 {
- gothrow("need padding in bucket (value)")
- }
-
- // find size parameter which will hold the requested # of elements
- B := uint8(0)
- for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
- }
-
- // allocate initial hash table
- // if B == 0, the buckets field is allocated lazily later (in mapassign)
- // If hint is large zeroing this memory could take a while.
- var buckets unsafe.Pointer
- if B != 0 {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- buckets = newarray(t.bucket, uintptr(1)<<B)
- }
-
- // initialize Hmap
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- h := (*hmap)(newobject(t.hmap))
- h.count = 0
- h.B = B
- h.flags = 0
- h.hash0 = fastrand1()
- h.buckets = buckets
- h.oldbuckets = nil
- h.nevacuate = 0
-
- return h
-}
-
-// mapaccess1 returns a pointer to h[key]. Never returns nil, instead
-// it will return a reference to the zero object for the value type if
-// the key is not in the map.
-// NOTE: The returned pointer may keep the whole map live, so don't
-// hold onto it for very long.
-func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapaccess1)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero)
- }
- }
-}
-
-func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapaccess2)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v, true
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero), false
- }
- }
-}
-
-// returns both key and value. Used by map iterator
-func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
- if h == nil || h.count == 0 {
- return nil, nil
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return k, v
- }
- }
- b = b.overflow(t)
- if b == nil {
- return nil, nil
- }
- }
-}
-
-func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
- if h == nil {
- panic("assignment to entry in nil map")
- }
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapassign1)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- raceReadObjectPC(t.elem, val, callerpc, pc)
- }
-
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
-
- if h.buckets == nil {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- h.buckets = newarray(t.bucket, 1)
- }
-
-again:
- bucket := hash & (uintptr(1)<<h.B - 1)
- if h.oldbuckets != nil {
- growWork(t, h, bucket)
- }
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
-
- var inserti *uint8
- var insertk unsafe.Pointer
- var insertv unsafe.Pointer
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == empty && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !alg.equal(key, k2, uintptr(t.key.size)) {
- continue
- }
- // already have a mapping for key. Update it.
- memmove(k2, key, uintptr(t.key.size))
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- v2 := v
- if t.indirectvalue {
- v2 = *((*unsafe.Pointer)(v2))
- }
- memmove(v2, val, uintptr(t.elem.size))
- return
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- // did not find mapping for key. Allocate new cell & add entry.
- if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
-
- if inserti == nil {
- // all current buckets are full, allocate a new one.
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newb := (*bmap)(newobject(t.bucket))
- b.setoverflow(t, newb)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- insertv = add(insertk, bucketCnt*uintptr(t.keysize))
- }
-
- // store new key/value at insert position
- if t.indirectkey {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- kmem := newobject(t.key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.indirectvalue {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- vmem := newobject(t.elem)
- *(*unsafe.Pointer)(insertv) = vmem
- insertv = vmem
- }
- memmove(insertk, key, uintptr(t.key.size))
- memmove(insertv, val, uintptr(t.elem.size))
- *inserti = top
- h.count++
-}
-
-func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapdelete)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- bucket := hash & (uintptr(1)<<h.B - 1)
- if h.oldbuckets != nil {
- growWork(t, h, bucket)
- }
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !alg.equal(key, k2, uintptr(t.key.size)) {
- continue
- }
- memclr(k, uintptr(t.keysize))
- v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
- memclr(v, uintptr(t.valuesize))
- b.tophash[i] = empty
- h.count--
- return
- }
- b = b.overflow(t)
- if b == nil {
- return
- }
- }
-}
-
-func mapiterinit(t *maptype, h *hmap, it *hiter) {
- // Clear pointer fields so garbage collector does not complain.
- it.key = nil
- it.value = nil
- it.t = nil
- it.h = nil
- it.buckets = nil
- it.bptr = nil
-
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
- }
-
- if h == nil || h.count == 0 {
- it.key = nil
- it.value = nil
- return
- }
-
- if unsafe.Sizeof(hiter{})/ptrSize != 10 {
- gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
- }
- it.t = t
- it.h = h
-
- // grab snapshot of bucket state
- it.B = h.B
- it.buckets = h.buckets
-
- // decide where to start
- r := uintptr(fastrand1())
- if h.B > 31-bucketCntBits {
- r += uintptr(fastrand1()) << 31
- }
- it.startBucket = r & (uintptr(1)<<h.B - 1)
- it.offset = uint8(r >> h.B & (bucketCnt - 1))
-
- // iterator state
- it.bucket = it.startBucket
- it.wrapped = false
- it.bptr = nil
-
- // Remember we have an iterator.
- // Can run concurrently with another hash_iter_init().
- for {
- old := h.flags
- if old == old|iterator|oldIterator {
- break
- }
- if cas(&h.flags, old, old|iterator|oldIterator) {
- break
- }
- }
-
- mapiternext(it)
-}
-
-func mapiternext(it *hiter) {
- h := it.h
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&it))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
- }
- t := it.t
- bucket := it.bucket
- b := it.bptr
- i := it.i
- checkBucket := it.checkBucket
- alg := goalg(t.key.alg)
-
-next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
- // end of iteration
- it.key = nil
- it.value = nil
- return
- }
- if h.oldbuckets != nil && it.B == h.B {
- // Iterator was started in the middle of a grow, and the grow isn't done yet.
- // If the bucket we're looking at hasn't been filled in yet (i.e. the old
- // bucket hasn't been evacuated) then we need to iterate through the old
- // bucket and only return the ones that will be migrated to this bucket.
- oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == uintptr(1)<<it.B {
- bucket = 0
- it.wrapped = true
- }
- i = 0
- }
- for ; i < bucketCnt; i++ {
- offi := (i + it.offset) & (bucketCnt - 1)
- k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
- if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
- if checkBucket != noCheck {
- // Special case: iterator was started during a grow and the
- // grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. Or at least, it wasn't
- // evacuated when we started the bucket. So we're iterating
- // through the oldbucket, skipping any keys that will go
- // to the other new bucket (each oldbucket expands to two
- // buckets during a grow).
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if alg.equal(k2, k2, uintptr(t.key.size)) {
- // If the item in the oldbucket is not destined for
- // the current new bucket in the iteration, skip it.
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
- if hash&(uintptr(1)<<it.B-1) != checkBucket {
- continue
- }
- } else {
- // Hash isn't repeatable if k != k (NaNs). We need a
- // repeatable and randomish choice of which direction
- // to send NaNs during evacuation. We'll use the low
- // bit of tophash to decide which way NaNs go.
- // NOTE: this case is why we need two evacuate tophash
- // values, evacuatedX and evacuatedY, that differ in
- // their low bit.
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
- // this is the golden data, we can return it.
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- it.key = k
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- it.value = v
- } else {
- // The hash table has grown since the iterator was started.
- // The golden data for this key is now somewhere else.
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if alg.equal(k2, k2, uintptr(t.key.size)) {
- // Check the current hash table for the data.
- // This code handles the case where the key
- // has been deleted, updated, or deleted and reinserted.
- // NOTE: we need to regrab the key as it has potentially been
- // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
- rk, rv := mapaccessK(t, h, k2)
- if rk == nil {
- continue // key has been deleted
- }
- it.key = rk
- it.value = rv
- } else {
- // if key!=key then the entry can't be deleted or
- // updated, so we can just return it. That's lucky for
- // us because when key!=key we can't look it up
- // successfully in the current table.
- it.key = k2
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- it.value = v
- }
- }
- it.bucket = bucket
- it.bptr = b
- it.i = i + 1
- it.checkBucket = checkBucket
- return
- }
- }
- b = b.overflow(t)
- i = 0
- goto next
-}
-
-func hashGrow(t *maptype, h *hmap) {
- if h.oldbuckets != nil {
- gothrow("evacuation not done in time")
- }
- oldbuckets := h.buckets
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
- // commit the grow (atomic wrt gc)
- h.B++
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
-
- // the actual copying of the hash table data is done incrementally
- // by growWork() and evacuate().
-}
-
-func growWork(t *maptype, h *hmap, bucket uintptr) {
- noldbuckets := uintptr(1) << (h.B - 1)
-
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate(t, h, bucket&(noldbuckets-1))
-
- // evacuate one more oldbucket to make progress on growing
- if h.oldbuckets != nil {
- evacuate(t, h, h.nevacuate)
- }
-}
-
-func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := uintptr(1) << (h.B - 1)
- alg := goalg(t.key.alg)
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
-
- x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- xi := 0
- yi := 0
- xk := add(unsafe.Pointer(x), dataOffset)
- yk := add(unsafe.Pointer(y), dataOffset)
- xv := add(xk, bucketCnt*uintptr(t.keysize))
- yv := add(yk, bucketCnt*uintptr(t.keysize))
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- v := add(k, bucketCnt*uintptr(t.keysize))
- for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
- top := b.tophash[i]
- if top == empty {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- gothrow("bad map state")
- }
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/value to bucket x or bucket y).
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
- if h.flags&iterator != 0 {
- if !alg.equal(k2, k2, uintptr(t.key.size)) {
- // If key != key (NaNs), then the hash could be (and probably
- // will be) entirely different from the old hash. Moreover,
- // it isn't reproducible. Reproducibility is required in the
- // presence of iterators, as our evacuation decision must
- // match whatever decision the iterator made.
- // Fortunately, we have the freedom to send these keys either
- // way. Also, tophash is meaningless for these kinds of keys.
- // We let the low bit of tophash drive the evacuation decision.
- // We recompute a new random tophash for the next level so
- // these keys will get evenly distributed across all buckets
- // after multiple grows.
- if (top & 1) != 0 {
- hash |= newbit
- } else {
- hash &^= newbit
- }
- top = uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- }
- }
- if (hash & newbit) == 0 {
- b.tophash[i] = evacuatedX
- if xi == bucketCnt {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newx := (*bmap)(newobject(t.bucket))
- x.setoverflow(t, newx)
- x = newx
- xi = 0
- xk = add(unsafe.Pointer(x), dataOffset)
- xv = add(xk, bucketCnt*uintptr(t.keysize))
- }
- x.tophash[xi] = top
- if t.indirectkey {
- *(*unsafe.Pointer)(xk) = k2 // copy pointer
- } else {
- memmove(xk, k, uintptr(t.key.size)) // copy value
- }
- if t.indirectvalue {
- *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
- } else {
- memmove(xv, v, uintptr(t.elem.size))
- }
- xi++
- xk = add(xk, uintptr(t.keysize))
- xv = add(xv, uintptr(t.valuesize))
- } else {
- b.tophash[i] = evacuatedY
- if yi == bucketCnt {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newy := (*bmap)(newobject(t.bucket))
- y.setoverflow(t, newy)
- y = newy
- yi = 0
- yk = add(unsafe.Pointer(y), dataOffset)
- yv = add(yk, bucketCnt*uintptr(t.keysize))
- }
- y.tophash[yi] = top
- if t.indirectkey {
- *(*unsafe.Pointer)(yk) = k2
- } else {
- memmove(yk, k, uintptr(t.key.size))
- }
- if t.indirectvalue {
- *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
- } else {
- memmove(yv, v, uintptr(t.elem.size))
- }
- yi++
- yk = add(yk, uintptr(t.keysize))
- yv = add(yv, uintptr(t.valuesize))
- }
- }
- }
- // Unlink the overflow buckets & clear key/value to help GC.
- if h.flags&oldIterator == 0 {
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
- }
- }
-
- // Advance evacuation mark
- if oldbucket == h.nevacuate {
- h.nevacuate = oldbucket + 1
- if oldbucket+1 == newbit { // newbit == # of oldbuckets
- // Growing is all done. Free old main bucket array.
- h.oldbuckets = nil
- }
- }
-}
-
-func ismapkey(t *_type) bool {
- return goalg(t.alg).hash != nil
-}
-
-// Reflect stubs. Called from ../reflect/asm_*.s
-
-func reflect_makemap(t *maptype) *hmap {
- return makemap(t, 0)
-}
-
-func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- val, ok := mapaccess2(t, h, key)
- if !ok {
- // reflect wants nil for a missing element
- val = nil
- }
- return val
-}
-
-func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
- mapassign1(t, h, key, val)
-}
-
-func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- mapdelete(t, h, key)
-}
-
-func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
- it := new(hiter)
- mapiterinit(t, h, it)
- return it
-}
-
-func reflect_mapiternext(it *hiter) {
- mapiternext(it)
-}
-
-func reflect_mapiterkey(it *hiter) unsafe.Pointer {
- return it.key
-}
-
-func reflect_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&h))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
- }
- return h.count
-}
-
-func reflect_ismapkey(t *_type) bool {
- return ismapkey(t)
-}
diff --git a/libgo/go/runtime/hashmap_fast.go b/libgo/go/runtime/hashmap_fast.go
deleted file mode 100644
index afa6ecc..0000000
--- a/libgo/go/runtime/hashmap_fast.go
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
- }
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero)
- }
- }
-}
-
-func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
- if k != key {
- continue
- }
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero), false
- }
- }
-}
-
-func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
- }
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero)
- }
- }
-}
-
-func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
- }
- var b *bmap
- if h.B == 0 {
- // One-bucket table. No need to hash.
- b = (*bmap)(h.buckets)
- } else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
- if k != key {
- continue
- }
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero), false
- }
- }
-}
-
-func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
- }
- key := (*stringStruct)(unsafe.Pointer(&ky))
- if h.B == 0 {
- // One-bucket table.
- b := (*bmap)(h.buckets)
- if key.len < 32 {
- // short key, doing lots of comparisons is ok
- for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
- if k.len != key.len {
- continue
- }
- if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
- }
- }
- return unsafe.Pointer(t.elem.zero)
- }
- // long key, try not to do more comparisons than necessary
- keymaybe := uintptr(bucketCnt)
- for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
- if k.len != key.len {
- continue
- }
- if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
- }
- // check first 4 bytes
- // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
- // four 1-byte comparisons.
- if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
- continue
- }
- // check last 4 bytes
- if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
- continue
- }
- if keymaybe != bucketCnt {
- // Two keys are potential matches. Use hash to distinguish them.
- goto dohash
- }
- keymaybe = i
- }
- if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
- if memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
- }
- }
- return unsafe.Pointer(t.elem.zero)
- }
-dohash:
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x != top {
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
- if k.len != key.len {
- continue
- }
- if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero)
- }
- }
-}
-
-func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
- }
- key := (*stringStruct)(unsafe.Pointer(&ky))
- if h.B == 0 {
- // One-bucket table.
- b := (*bmap)(h.buckets)
- if key.len < 32 {
- // short key, doing lots of comparisons is ok
- for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
- if k.len != key.len {
- continue
- }
- if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
- }
- }
- return unsafe.Pointer(t.elem.zero), false
- }
- // long key, try not to do more comparisons than necessary
- keymaybe := uintptr(bucketCnt)
- for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x == empty {
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
- if k.len != key.len {
- continue
- }
- if k.str == key.str {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
- }
- // check first 4 bytes
- if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
- continue
- }
- // check last 4 bytes
- if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
- continue
- }
- if keymaybe != bucketCnt {
- // Two keys are potential matches. Use hash to distinguish them.
- goto dohash
- }
- keymaybe = i
- }
- if keymaybe != bucketCnt {
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
- if memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
- }
- }
- return unsafe.Pointer(t.elem.zero), false
- }
-dohash:
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
- if x != top {
- continue
- }
- k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
- if k.len != key.len {
- continue
- }
- if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
- return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero), false
- }
- }
-}
diff --git a/libgo/go/runtime/iface_test.go b/libgo/go/runtime/iface_test.go
index bca0ea0..7f27baa 100644
--- a/libgo/go/runtime/iface_test.go
+++ b/libgo/go/runtime/iface_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "runtime"
"testing"
)
@@ -36,8 +37,50 @@ var (
ts TS
tm TM
tl TL
+ ok bool
)
+// Issue 9370
+func TestCmpIfaceConcreteAlloc(t *testing.T) {
+ if runtime.Compiler != "gc" {
+ t.Skip("skipping on non-gc compiler")
+ }
+
+ n := testing.AllocsPerRun(1, func() {
+ _ = e == ts
+ _ = i1 == ts
+ _ = e == 1
+ })
+
+ if n > 0 {
+ t.Fatalf("iface cmp allocs=%v; want 0", n)
+ }
+}
+
+func BenchmarkEqEfaceConcrete(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _ = e == ts
+ }
+}
+
+func BenchmarkEqIfaceConcrete(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _ = i1 == ts
+ }
+}
+
+func BenchmarkNeEfaceConcrete(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _ = e != ts
+ }
+}
+
+func BenchmarkNeIfaceConcrete(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ _ = i1 != ts
+ }
+}
+
func BenchmarkConvT2ESmall(b *testing.B) {
for i := 0; i < b.N; i++ {
e = ts
@@ -136,3 +179,85 @@ func BenchmarkAssertE2E(b *testing.B) {
e_ = e
}
}
+
+func BenchmarkAssertE2T2(b *testing.B) {
+ e = tm
+ for i := 0; i < b.N; i++ {
+ tm, ok = e.(TM)
+ }
+}
+
+func BenchmarkAssertE2T2Blank(b *testing.B) {
+ e = tm
+ for i := 0; i < b.N; i++ {
+ _, ok = e.(TM)
+ }
+}
+
+func BenchmarkAssertI2E2(b *testing.B) {
+ i1 = tm
+ for i := 0; i < b.N; i++ {
+ e, ok = i1.(interface{})
+ }
+}
+
+func BenchmarkAssertI2E2Blank(b *testing.B) {
+ i1 = tm
+ for i := 0; i < b.N; i++ {
+ _, ok = i1.(interface{})
+ }
+}
+
+func BenchmarkAssertE2E2(b *testing.B) {
+ e = tm
+ for i := 0; i < b.N; i++ {
+ e_, ok = e.(interface{})
+ }
+}
+
+func BenchmarkAssertE2E2Blank(b *testing.B) {
+ e = tm
+ for i := 0; i < b.N; i++ {
+ _, ok = e.(interface{})
+ }
+}
+
+func TestNonEscapingConvT2E(t *testing.T) {
+ m := make(map[interface{}]bool)
+ m[42] = true
+ if !m[42] {
+ t.Fatalf("42 is not present in the map")
+ }
+ if m[0] {
+ t.Fatalf("0 is present in the map")
+ }
+
+ n := testing.AllocsPerRun(1000, func() {
+ if m[0] {
+ t.Fatalf("0 is present in the map")
+ }
+ })
+ if n != 0 {
+ t.Fatalf("want 0 allocs, got %v", n)
+ }
+}
+
+func TestNonEscapingConvT2I(t *testing.T) {
+ m := make(map[I1]bool)
+ m[TM(42)] = true
+ if !m[TM(42)] {
+ t.Fatalf("42 is not present in the map")
+ }
+ if m[TM(0)] {
+ t.Fatalf("0 is present in the map")
+ }
+
+ n := testing.AllocsPerRun(1000, func() {
+ if m[TM(0)] {
+ t.Fatalf("0 is present in the map")
+ }
+ })
+ if n != 0 {
+ t.Fatalf("want 0 allocs, got %v", n)
+ }
+}
diff --git a/libgo/go/runtime/lfstack_test.go b/libgo/go/runtime/lfstack_test.go
index e518777..fb4b459 100644
--- a/libgo/go/runtime/lfstack_test.go
+++ b/libgo/go/runtime/lfstack_test.go
@@ -24,9 +24,13 @@ func toMyNode(node *LFNode) *MyNode {
return (*MyNode)(unsafe.Pointer(node))
}
+var global interface{}
+
func TestLFStack(t *testing.T) {
stack := new(uint64)
- // Need to keep additional referenfces to nodes, the stack is not all that type-safe.
+ global = stack // force heap allocation
+
+ // Need to keep additional references to nodes, the stack is not all that type-safe.
var nodes []*MyNode
// Check the stack is initially empty.
@@ -121,7 +125,7 @@ func TestLFStackStress(t *testing.T) {
}
cnt++
sum2 += node.data
- node.Next = nil
+ node.Next = 0
}
}
if cnt != K {
diff --git a/libgo/go/runtime/lock_futex.go b/libgo/go/runtime/lock_futex.go
deleted file mode 100644
index 7259623..0000000
--- a/libgo/go/runtime/lock_futex.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build dragonfly freebsd linux
-
-package runtime
-
-import "unsafe"
-
-// This implementation depends on OS-specific implementations of
-//
-// runtime·futexsleep(uint32 *addr, uint32 val, int64 ns)
-// Atomically,
-// if(*addr == val) sleep
-// Might be woken up spuriously; that's allowed.
-// Don't sleep longer than ns; ns < 0 means forever.
-//
-// runtime·futexwakeup(uint32 *addr, uint32 cnt)
-// If any procs are sleeping on addr, wake up at most cnt.
-
-const (
- mutex_unlocked = 0
- mutex_locked = 1
- mutex_sleeping = 2
-
- active_spin = 4
- active_spin_cnt = 30
- passive_spin = 1
-)
-
-// Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping.
-// mutex_sleeping means that there is presumably at least one sleeping thread.
-// Note that there can be spinning threads during all states - they do not
-// affect mutex's state.
-
-func futexsleep(addr *uint32, val uint32, ns int64)
-func futexwakeup(addr *uint32, cnt uint32)
-
-// We use the uintptr mutex.key and note.key as a uint32.
-func key32(p *uintptr) *uint32 {
- return (*uint32)(unsafe.Pointer(p))
-}
-
-func lock(l *mutex) {
- gp := getg()
-
- if gp.m.locks < 0 {
- gothrow("runtime·lock: lock count")
- }
- gp.m.locks++
-
- // Speculative grab for lock.
- v := xchg(key32(&l.key), mutex_locked)
- if v == mutex_unlocked {
- return
- }
-
- // wait is either MUTEX_LOCKED or MUTEX_SLEEPING
- // depending on whether there is a thread sleeping
- // on this mutex. If we ever change l->key from
- // MUTEX_SLEEPING to some other value, we must be
- // careful to change it back to MUTEX_SLEEPING before
- // returning, to ensure that the sleeping thread gets
- // its wakeup call.
- wait := v
-
- // On uniprocessors, no point spinning.
- // On multiprocessors, spin for ACTIVE_SPIN attempts.
- spin := 0
- if ncpu > 1 {
- spin = active_spin
- }
- for {
- // Try for lock, spinning.
- for i := 0; i < spin; i++ {
- for l.key == mutex_unlocked {
- if cas(key32(&l.key), mutex_unlocked, wait) {
- return
- }
- }
- procyield(active_spin_cnt)
- }
-
- // Try for lock, rescheduling.
- for i := 0; i < passive_spin; i++ {
- for l.key == mutex_unlocked {
- if cas(key32(&l.key), mutex_unlocked, wait) {
- return
- }
- }
- osyield()
- }
-
- // Sleep.
- v = xchg(key32(&l.key), mutex_sleeping)
- if v == mutex_unlocked {
- return
- }
- wait = mutex_sleeping
- futexsleep(key32(&l.key), mutex_sleeping, -1)
- }
-}
-
-func unlock(l *mutex) {
- v := xchg(key32(&l.key), mutex_unlocked)
- if v == mutex_unlocked {
- gothrow("unlock of unlocked lock")
- }
- if v == mutex_sleeping {
- futexwakeup(key32(&l.key), 1)
- }
-
- gp := getg()
- gp.m.locks--
- if gp.m.locks < 0 {
- gothrow("runtime·unlock: lock count")
- }
- if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
- gp.stackguard0 = stackPreempt
- }
-}
-
-// One-time notifications.
-func noteclear(n *note) {
- n.key = 0
-}
-
-func notewakeup(n *note) {
- old := xchg(key32(&n.key), 1)
- if old != 0 {
- print("notewakeup - double wakeup (", old, ")\n")
- gothrow("notewakeup - double wakeup")
- }
- futexwakeup(key32(&n.key), 1)
-}
-
-func notesleep(n *note) {
- gp := getg()
- if gp != gp.m.g0 {
- gothrow("notesleep not on g0")
- }
- for atomicload(key32(&n.key)) == 0 {
- gp.m.blocked = true
- futexsleep(key32(&n.key), 0, -1)
- gp.m.blocked = false
- }
-}
-
-//go:nosplit
-func notetsleep_internal(n *note, ns int64) bool {
- gp := getg()
-
- if ns < 0 {
- for atomicload(key32(&n.key)) == 0 {
- gp.m.blocked = true
- futexsleep(key32(&n.key), 0, -1)
- gp.m.blocked = false
- }
- return true
- }
-
- if atomicload(key32(&n.key)) != 0 {
- return true
- }
-
- deadline := nanotime() + ns
- for {
- gp.m.blocked = true
- futexsleep(key32(&n.key), 0, ns)
- gp.m.blocked = false
- if atomicload(key32(&n.key)) != 0 {
- break
- }
- now := nanotime()
- if now >= deadline {
- break
- }
- ns = deadline - now
- }
- return atomicload(key32(&n.key)) != 0
-}
-
-func notetsleep(n *note, ns int64) bool {
- gp := getg()
- if gp != gp.m.g0 && gp.m.gcing == 0 {
- gothrow("notetsleep not on g0")
- }
-
- return notetsleep_internal(n, ns)
-}
-
-// same as runtime·notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
-func notetsleepg(n *note, ns int64) bool {
- gp := getg()
- if gp == gp.m.g0 {
- gothrow("notetsleepg on g0")
- }
-
- entersyscallblock()
- ok := notetsleep_internal(n, ns)
- exitsyscall()
- return ok
-}
diff --git a/libgo/go/runtime/lock_sema.go b/libgo/go/runtime/lock_sema.go
deleted file mode 100644
index d136b82..0000000
--- a/libgo/go/runtime/lock_sema.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin nacl netbsd openbsd plan9 solaris windows
-
-package runtime
-
-import "unsafe"
-
-// This implementation depends on OS-specific implementations of
-//
-// uintptr runtime·semacreate(void)
-// Create a semaphore, which will be assigned to m->waitsema.
-// The zero value is treated as absence of any semaphore,
-// so be sure to return a non-zero value.
-//
-// int32 runtime·semasleep(int64 ns)
-// If ns < 0, acquire m->waitsema and return 0.
-// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
-// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
-//
-// int32 runtime·semawakeup(M *mp)
-// Wake up mp, which is or will soon be sleeping on mp->waitsema.
-//
-const (
- locked uintptr = 1
-
- active_spin = 4
- active_spin_cnt = 30
- passive_spin = 1
-)
-
-func semacreate() uintptr
-func semasleep(int64) int32
-func semawakeup(mp *m)
-
-func lock(l *mutex) {
- gp := getg()
- if gp.m.locks < 0 {
- gothrow("runtime·lock: lock count")
- }
- gp.m.locks++
-
- // Speculative grab for lock.
- if casuintptr(&l.key, 0, locked) {
- return
- }
- if gp.m.waitsema == 0 {
- gp.m.waitsema = semacreate()
- }
-
- // On uniprocessor's, no point spinning.
- // On multiprocessors, spin for ACTIVE_SPIN attempts.
- spin := 0
- if ncpu > 1 {
- spin = active_spin
- }
-Loop:
- for i := 0; ; i++ {
- v := atomicloaduintptr(&l.key)
- if v&locked == 0 {
- // Unlocked. Try to lock.
- if casuintptr(&l.key, v, v|locked) {
- return
- }
- i = 0
- }
- if i < spin {
- procyield(active_spin_cnt)
- } else if i < spin+passive_spin {
- osyield()
- } else {
- // Someone else has it.
- // l->waitm points to a linked list of M's waiting
- // for this lock, chained through m->nextwaitm.
- // Queue this M.
- for {
- gp.m.nextwaitm = (*m)((unsafe.Pointer)(v &^ locked))
- if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
- break
- }
- v = atomicloaduintptr(&l.key)
- if v&locked == 0 {
- continue Loop
- }
- }
- if v&locked != 0 {
- // Queued. Wait.
- semasleep(-1)
- i = 0
- }
- }
- }
-}
-
-func unlock(l *mutex) {
- gp := getg()
- var mp *m
- for {
- v := atomicloaduintptr(&l.key)
- if v == locked {
- if casuintptr(&l.key, locked, 0) {
- break
- }
- } else {
- // Other M's are waiting for the lock.
- // Dequeue an M.
- mp = (*m)((unsafe.Pointer)(v &^ locked))
- if casuintptr(&l.key, v, uintptr(unsafe.Pointer(mp.nextwaitm))) {
- // Dequeued an M. Wake it.
- semawakeup(mp)
- break
- }
- }
- }
- gp.m.locks--
- if gp.m.locks < 0 {
- gothrow("runtime·unlock: lock count")
- }
- if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
- gp.stackguard0 = stackPreempt
- }
-}
-
-// One-time notifications.
-func noteclear(n *note) {
- n.key = 0
-}
-
-func notewakeup(n *note) {
- var v uintptr
- for {
- v = atomicloaduintptr(&n.key)
- if casuintptr(&n.key, v, locked) {
- break
- }
- }
-
- // Successfully set waitm to locked.
- // What was it before?
- switch {
- case v == 0:
- // Nothing was waiting. Done.
- case v == locked:
- // Two notewakeups! Not allowed.
- gothrow("notewakeup - double wakeup")
- default:
- // Must be the waiting m. Wake it up.
- semawakeup((*m)(unsafe.Pointer(v)))
- }
-}
-
-func notesleep(n *note) {
- gp := getg()
- if gp != gp.m.g0 {
- gothrow("notesleep not on g0")
- }
- if gp.m.waitsema == 0 {
- gp.m.waitsema = semacreate()
- }
- if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
- // Must be locked (got wakeup).
- if n.key != locked {
- gothrow("notesleep - waitm out of sync")
- }
- return
- }
- // Queued. Sleep.
- gp.m.blocked = true
- semasleep(-1)
- gp.m.blocked = false
-}
-
-//go:nosplit
-func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
- // gp and deadline are logically local variables, but they are written
- // as parameters so that the stack space they require is charged
- // to the caller.
- // This reduces the nosplit footprint of notetsleep_internal.
- gp = getg()
-
- // Register for wakeup on n->waitm.
- if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
- // Must be locked (got wakeup).
- if n.key != locked {
- gothrow("notetsleep - waitm out of sync")
- }
- return true
- }
- if ns < 0 {
- // Queued. Sleep.
- gp.m.blocked = true
- semasleep(-1)
- gp.m.blocked = false
- return true
- }
-
- deadline = nanotime() + ns
- for {
- // Registered. Sleep.
- gp.m.blocked = true
- if semasleep(ns) >= 0 {
- gp.m.blocked = false
- // Acquired semaphore, semawakeup unregistered us.
- // Done.
- return true
- }
- gp.m.blocked = false
- // Interrupted or timed out. Still registered. Semaphore not acquired.
- ns = deadline - nanotime()
- if ns <= 0 {
- break
- }
- // Deadline hasn't arrived. Keep sleeping.
- }
-
- // Deadline arrived. Still registered. Semaphore not acquired.
- // Want to give up and return, but have to unregister first,
- // so that any notewakeup racing with the return does not
- // try to grant us the semaphore when we don't expect it.
- for {
- v := atomicloaduintptr(&n.key)
- switch v {
- case uintptr(unsafe.Pointer(gp.m)):
- // No wakeup yet; unregister if possible.
- if casuintptr(&n.key, v, 0) {
- return false
- }
- case locked:
- // Wakeup happened so semaphore is available.
- // Grab it to avoid getting out of sync.
- gp.m.blocked = true
- if semasleep(-1) < 0 {
- gothrow("runtime: unable to acquire - semaphore out of sync")
- }
- gp.m.blocked = false
- return true
- default:
- gothrow("runtime: unexpected waitm - semaphore out of sync")
- }
- }
-}
-
-func notetsleep(n *note, ns int64) bool {
- gp := getg()
- if gp != gp.m.g0 && gp.m.gcing == 0 {
- gothrow("notetsleep not on g0")
- }
- if gp.m.waitsema == 0 {
- gp.m.waitsema = semacreate()
- }
- return notetsleep_internal(n, ns, nil, 0)
-}
-
-// same as runtime·notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
-func notetsleepg(n *note, ns int64) bool {
- gp := getg()
- if gp == gp.m.g0 {
- gothrow("notetsleepg on g0")
- }
- if gp.m.waitsema == 0 {
- gp.m.waitsema = semacreate()
- }
- entersyscallblock()
- ok := notetsleep_internal(n, ns, nil, 0)
- exitsyscall()
- return ok
-}
diff --git a/libgo/go/runtime/malloc.go b/libgo/go/runtime/malloc.go
deleted file mode 100644
index 1170449..0000000
--- a/libgo/go/runtime/malloc.go
+++ /dev/null
@@ -1,837 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-const (
- debugMalloc = false
-
- flagNoScan = _FlagNoScan
- flagNoZero = _FlagNoZero
-
- maxTinySize = _TinySize
- tinySizeClass = _TinySizeClass
- maxSmallSize = _MaxSmallSize
-
- pageShift = _PageShift
- pageSize = _PageSize
- pageMask = _PageMask
-
- bitsPerPointer = _BitsPerPointer
- bitsMask = _BitsMask
- pointersPerByte = _PointersPerByte
- maxGCMask = _MaxGCMask
- bitsDead = _BitsDead
- bitsPointer = _BitsPointer
-
- mSpanInUse = _MSpanInUse
-
- concurrentSweep = _ConcurrentSweep != 0
-)
-
-// Page number (address>>pageShift)
-type pageID uintptr
-
-// base address for all 0-byte allocations
-var zerobase uintptr
-
-// Allocate an object of size bytes.
-// Small objects are allocated from the per-P cache's free lists.
-// Large objects (> 32 kB) are allocated straight from the heap.
-func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
- if size == 0 {
- return unsafe.Pointer(&zerobase)
- }
- size0 := size
-
- if flags&flagNoScan == 0 && typ == nil {
- gothrow("malloc missing type")
- }
-
- // This function must be atomic wrt GC, but for performance reasons
- // we don't acquirem/releasem on fast path. The code below does not have
- // split stack checks, so it can't be preempted by GC.
- // Functions like roundup/add are inlined. And onM/racemalloc are nosplit.
- // If debugMalloc = true, these assumptions are checked below.
- if debugMalloc {
- mp := acquirem()
- if mp.mallocing != 0 {
- gothrow("malloc deadlock")
- }
- mp.mallocing = 1
- if mp.curg != nil {
- mp.curg.stackguard0 = ^uintptr(0xfff) | 0xbad
- }
- }
-
- c := gomcache()
- var s *mspan
- var x unsafe.Pointer
- if size <= maxSmallSize {
- if flags&flagNoScan != 0 && size < maxTinySize {
- // Tiny allocator.
- //
- // Tiny allocator combines several tiny allocation requests
- // into a single memory block. The resulting memory block
- // is freed when all subobjects are unreachable. The subobjects
- // must be FlagNoScan (don't have pointers), this ensures that
- // the amount of potentially wasted memory is bounded.
- //
- // Size of the memory block used for combining (maxTinySize) is tunable.
- // Current setting is 16 bytes, which relates to 2x worst case memory
- // wastage (when all but one subobjects are unreachable).
- // 8 bytes would result in no wastage at all, but provides less
- // opportunities for combining.
- // 32 bytes provides more opportunities for combining,
- // but can lead to 4x worst case wastage.
- // The best case winning is 8x regardless of block size.
- //
- // Objects obtained from tiny allocator must not be freed explicitly.
- // So when an object will be freed explicitly, we ensure that
- // its size >= maxTinySize.
- //
- // SetFinalizer has a special case for objects potentially coming
- // from tiny allocator, it such case it allows to set finalizers
- // for an inner byte of a memory block.
- //
- // The main targets of tiny allocator are small strings and
- // standalone escaping variables. On a json benchmark
- // the allocator reduces number of allocations by ~12% and
- // reduces heap size by ~20%.
- tinysize := uintptr(c.tinysize)
- if size <= tinysize {
- tiny := unsafe.Pointer(c.tiny)
- // Align tiny pointer for required (conservative) alignment.
- if size&7 == 0 {
- tiny = roundup(tiny, 8)
- } else if size&3 == 0 {
- tiny = roundup(tiny, 4)
- } else if size&1 == 0 {
- tiny = roundup(tiny, 2)
- }
- size1 := size + (uintptr(tiny) - uintptr(unsafe.Pointer(c.tiny)))
- if size1 <= tinysize {
- // The object fits into existing tiny block.
- x = tiny
- c.tiny = (*byte)(add(x, size))
- c.tinysize -= uintptr(size1)
- c.local_tinyallocs++
- if debugMalloc {
- mp := acquirem()
- if mp.mallocing == 0 {
- gothrow("bad malloc")
- }
- mp.mallocing = 0
- if mp.curg != nil {
- mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
- }
- // Note: one releasem for the acquirem just above.
- // The other for the acquirem at start of malloc.
- releasem(mp)
- releasem(mp)
- }
- return x
- }
- }
- // Allocate a new maxTinySize block.
- s = c.alloc[tinySizeClass]
- v := s.freelist
- if v == nil {
- mp := acquirem()
- mp.scalararg[0] = tinySizeClass
- onM(mcacheRefill_m)
- releasem(mp)
- s = c.alloc[tinySizeClass]
- v = s.freelist
- }
- s.freelist = v.next
- s.ref++
- //TODO: prefetch v.next
- x = unsafe.Pointer(v)
- (*[2]uint64)(x)[0] = 0
- (*[2]uint64)(x)[1] = 0
- // See if we need to replace the existing tiny block with the new one
- // based on amount of remaining free space.
- if maxTinySize-size > tinysize {
- c.tiny = (*byte)(add(x, size))
- c.tinysize = uintptr(maxTinySize - size)
- }
- size = maxTinySize
- } else {
- var sizeclass int8
- if size <= 1024-8 {
- sizeclass = size_to_class8[(size+7)>>3]
- } else {
- sizeclass = size_to_class128[(size-1024+127)>>7]
- }
- size = uintptr(class_to_size[sizeclass])
- s = c.alloc[sizeclass]
- v := s.freelist
- if v == nil {
- mp := acquirem()
- mp.scalararg[0] = uintptr(sizeclass)
- onM(mcacheRefill_m)
- releasem(mp)
- s = c.alloc[sizeclass]
- v = s.freelist
- }
- s.freelist = v.next
- s.ref++
- //TODO: prefetch
- x = unsafe.Pointer(v)
- if flags&flagNoZero == 0 {
- v.next = nil
- if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
- memclr(unsafe.Pointer(v), size)
- }
- }
- }
- c.local_cachealloc += intptr(size)
- } else {
- mp := acquirem()
- mp.scalararg[0] = uintptr(size)
- mp.scalararg[1] = uintptr(flags)
- onM(largeAlloc_m)
- s = (*mspan)(mp.ptrarg[0])
- mp.ptrarg[0] = nil
- releasem(mp)
- x = unsafe.Pointer(uintptr(s.start << pageShift))
- size = uintptr(s.elemsize)
- }
-
- if flags&flagNoScan != 0 {
- // All objects are pre-marked as noscan.
- goto marked
- }
-
- // If allocating a defer+arg block, now that we've picked a malloc size
- // large enough to hold everything, cut the "asked for" size down to
- // just the defer header, so that the GC bitmap will record the arg block
- // as containing nothing at all (as if it were unused space at the end of
- // a malloc block caused by size rounding).
- // The defer arg areas are scanned as part of scanstack.
- if typ == deferType {
- size0 = unsafe.Sizeof(_defer{})
- }
-
- // From here till marked label marking the object as allocated
- // and storing type info in the GC bitmap.
- {
- arena_start := uintptr(unsafe.Pointer(mheap_.arena_start))
- off := (uintptr(x) - arena_start) / ptrSize
- xbits := (*uint8)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
- shift := (off % wordsPerBitmapByte) * gcBits
- if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary {
- println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask))
- gothrow("bad bits in markallocated")
- }
-
- var ti, te uintptr
- var ptrmask *uint8
- if size == ptrSize {
- // It's one word and it has pointers, it must be a pointer.
- *xbits |= (bitsPointer << 2) << shift
- goto marked
- }
- if typ.kind&kindGCProg != 0 {
- nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize
- masksize := nptr
- if masksize%2 != 0 {
- masksize *= 2 // repeated
- }
- masksize = masksize * pointersPerByte / 8 // 4 bits per word
- masksize++ // unroll flag in the beginning
- if masksize > maxGCMask && typ.gc[1] != 0 {
- // If the mask is too large, unroll the program directly
- // into the GC bitmap. It's 7 times slower than copying
- // from the pre-unrolled mask, but saves 1/16 of type size
- // memory for the mask.
- mp := acquirem()
- mp.ptrarg[0] = x
- mp.ptrarg[1] = unsafe.Pointer(typ)
- mp.scalararg[0] = uintptr(size)
- mp.scalararg[1] = uintptr(size0)
- onM(unrollgcproginplace_m)
- releasem(mp)
- goto marked
- }
- ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
- // Check whether the program is already unrolled.
- if uintptr(atomicloadp(unsafe.Pointer(ptrmask)))&0xff == 0 {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(typ)
- onM(unrollgcprog_m)
- releasem(mp)
- }
- ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
- } else {
- ptrmask = (*uint8)(unsafe.Pointer(typ.gc[0])) // pointer to unrolled mask
- }
- if size == 2*ptrSize {
- *xbits = *ptrmask | bitBoundary
- goto marked
- }
- te = uintptr(typ.size) / ptrSize
- // If the type occupies odd number of words, its mask is repeated.
- if te%2 == 0 {
- te /= 2
- }
- // Copy pointer bitmask into the bitmap.
- for i := uintptr(0); i < size0; i += 2 * ptrSize {
- v := *(*uint8)(add(unsafe.Pointer(ptrmask), ti))
- ti++
- if ti == te {
- ti = 0
- }
- if i == 0 {
- v |= bitBoundary
- }
- if i+ptrSize == size0 {
- v &^= uint8(bitPtrMask << 4)
- }
-
- *xbits = v
- xbits = (*byte)(add(unsafe.Pointer(xbits), ^uintptr(0)))
- }
- if size0%(2*ptrSize) == 0 && size0 < size {
- // Mark the word after last object's word as bitsDead.
- *xbits = bitsDead << 2
- }
- }
-marked:
- if raceenabled {
- racemalloc(x, size)
- }
-
- if debugMalloc {
- mp := acquirem()
- if mp.mallocing == 0 {
- gothrow("bad malloc")
- }
- mp.mallocing = 0
- if mp.curg != nil {
- mp.curg.stackguard0 = mp.curg.stack.lo + _StackGuard
- }
- // Note: one releasem for the acquirem just above.
- // The other for the acquirem at start of malloc.
- releasem(mp)
- releasem(mp)
- }
-
- if debug.allocfreetrace != 0 {
- tracealloc(x, size, typ)
- }
-
- if rate := MemProfileRate; rate > 0 {
- if size < uintptr(rate) && int32(size) < c.next_sample {
- c.next_sample -= int32(size)
- } else {
- mp := acquirem()
- profilealloc(mp, x, size)
- releasem(mp)
- }
- }
-
- if memstats.heap_alloc >= memstats.next_gc {
- gogc(0)
- }
-
- return x
-}
-
-// implementation of new builtin
-func newobject(typ *_type) unsafe.Pointer {
- flags := uint32(0)
- if typ.kind&kindNoPointers != 0 {
- flags |= flagNoScan
- }
- return mallocgc(uintptr(typ.size), typ, flags)
-}
-
-// implementation of make builtin for slices
-func newarray(typ *_type, n uintptr) unsafe.Pointer {
- flags := uint32(0)
- if typ.kind&kindNoPointers != 0 {
- flags |= flagNoScan
- }
- if int(n) < 0 || (typ.size > 0 && n > maxmem/uintptr(typ.size)) {
- panic("runtime: allocation size out of range")
- }
- return mallocgc(uintptr(typ.size)*n, typ, flags)
-}
-
-// rawmem returns a chunk of pointerless memory. It is
-// not zeroed.
-func rawmem(size uintptr) unsafe.Pointer {
- return mallocgc(size, nil, flagNoScan|flagNoZero)
-}
-
-// round size up to next size class
-func goroundupsize(size uintptr) uintptr {
- if size < maxSmallSize {
- if size <= 1024-8 {
- return uintptr(class_to_size[size_to_class8[(size+7)>>3]])
- }
- return uintptr(class_to_size[size_to_class128[(size-1024+127)>>7]])
- }
- if size+pageSize < size {
- return size
- }
- return (size + pageSize - 1) &^ pageMask
-}
-
-func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
- c := mp.mcache
- rate := MemProfileRate
- if size < uintptr(rate) {
- // pick next profile time
- // If you change this, also change allocmcache.
- if rate > 0x3fffffff { // make 2*rate not overflow
- rate = 0x3fffffff
- }
- next := int32(fastrand1()) % (2 * int32(rate))
- // Subtract the "remainder" of the current allocation.
- // Otherwise objects that are close in size to sampling rate
- // will be under-sampled, because we consistently discard this remainder.
- next -= (int32(size) - c.next_sample)
- if next < 0 {
- next = 0
- }
- c.next_sample = next
- }
-
- mProf_Malloc(x, size)
-}
-
-// force = 1 - do GC regardless of current heap usage
-// force = 2 - go GC and eager sweep
-func gogc(force int32) {
- // The gc is turned off (via enablegc) until the bootstrap has completed.
- // Also, malloc gets called in the guts of a number of libraries that might be
- // holding locks. To avoid deadlocks during stoptheworld, don't bother
- // trying to run gc while holding a lock. The next mallocgc without a lock
- // will do the gc instead.
- mp := acquirem()
- if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 {
- releasem(mp)
- return
- }
- releasem(mp)
- mp = nil
-
- semacquire(&worldsema, false)
-
- if force == 0 && memstats.heap_alloc < memstats.next_gc {
- // typically threads which lost the race to grab
- // worldsema exit here when gc is done.
- semrelease(&worldsema)
- return
- }
-
- // Ok, we're doing it! Stop everybody else
- startTime := nanotime()
- mp = acquirem()
- mp.gcing = 1
- releasem(mp)
- onM(stoptheworld)
- if mp != acquirem() {
- gothrow("gogc: rescheduled")
- }
-
- clearpools()
-
- // Run gc on the g0 stack. We do this so that the g stack
- // we're currently running on will no longer change. Cuts
- // the root set down a bit (g0 stacks are not scanned, and
- // we don't need to scan gc's internal state). We also
- // need to switch to g0 so we can shrink the stack.
- n := 1
- if debug.gctrace > 1 {
- n = 2
- }
- for i := 0; i < n; i++ {
- if i > 0 {
- startTime = nanotime()
- }
- // switch to g0, call gc, then switch back
- mp.scalararg[0] = uintptr(uint32(startTime)) // low 32 bits
- mp.scalararg[1] = uintptr(startTime >> 32) // high 32 bits
- if force >= 2 {
- mp.scalararg[2] = 1 // eagersweep
- } else {
- mp.scalararg[2] = 0
- }
- onM(gc_m)
- }
-
- // all done
- mp.gcing = 0
- semrelease(&worldsema)
- onM(starttheworld)
- releasem(mp)
- mp = nil
-
- // now that gc is done, kick off finalizer thread if needed
- if !concurrentSweep {
- // give the queued finalizers, if any, a chance to run
- Gosched()
- }
-}
-
-// GC runs a garbage collection.
-func GC() {
- gogc(2)
-}
-
-// linker-provided
-var noptrdata struct{}
-var enoptrdata struct{}
-var noptrbss struct{}
-var enoptrbss struct{}
-
-// SetFinalizer sets the finalizer associated with x to f.
-// When the garbage collector finds an unreachable block
-// with an associated finalizer, it clears the association and runs
-// f(x) in a separate goroutine. This makes x reachable again, but
-// now without an associated finalizer. Assuming that SetFinalizer
-// is not called again, the next time the garbage collector sees
-// that x is unreachable, it will free x.
-//
-// SetFinalizer(x, nil) clears any finalizer associated with x.
-//
-// The argument x must be a pointer to an object allocated by
-// calling new or by taking the address of a composite literal.
-// The argument f must be a function that takes a single argument
-// to which x's type can be assigned, and can have arbitrary ignored return
-// values. If either of these is not true, SetFinalizer aborts the
-// program.
-//
-// Finalizers are run in dependency order: if A points at B, both have
-// finalizers, and they are otherwise unreachable, only the finalizer
-// for A runs; once A is freed, the finalizer for B can run.
-// If a cyclic structure includes a block with a finalizer, that
-// cycle is not guaranteed to be garbage collected and the finalizer
-// is not guaranteed to run, because there is no ordering that
-// respects the dependencies.
-//
-// The finalizer for x is scheduled to run at some arbitrary time after
-// x becomes unreachable.
-// There is no guarantee that finalizers will run before a program exits,
-// so typically they are useful only for releasing non-memory resources
-// associated with an object during a long-running program.
-// For example, an os.File object could use a finalizer to close the
-// associated operating system file descriptor when a program discards
-// an os.File without calling Close, but it would be a mistake
-// to depend on a finalizer to flush an in-memory I/O buffer such as a
-// bufio.Writer, because the buffer would not be flushed at program exit.
-//
-// It is not guaranteed that a finalizer will run if the size of *x is
-// zero bytes.
-//
-// It is not guaranteed that a finalizer will run for objects allocated
-// in initializers for package-level variables. Such objects may be
-// linker-allocated, not heap-allocated.
-//
-// A single goroutine runs all finalizers for a program, sequentially.
-// If a finalizer must run for a long time, it should do so by starting
-// a new goroutine.
-func SetFinalizer(obj interface{}, finalizer interface{}) {
- e := (*eface)(unsafe.Pointer(&obj))
- etyp := e._type
- if etyp == nil {
- gothrow("runtime.SetFinalizer: first argument is nil")
- }
- if etyp.kind&kindMask != kindPtr {
- gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer")
- }
- ot := (*ptrtype)(unsafe.Pointer(etyp))
- if ot.elem == nil {
- gothrow("nil elem type!")
- }
-
- // find the containing object
- _, base, _ := findObject(e.data)
-
- if base == nil {
- // 0-length objects are okay.
- if e.data == unsafe.Pointer(&zerobase) {
- return
- }
-
- // Global initializers might be linker-allocated.
- // var Foo = &Object{}
- // func main() {
- // runtime.SetFinalizer(Foo, nil)
- // }
- // The relevant segments are: noptrdata, data, bss, noptrbss.
- // We cannot assume they are in any order or even contiguous,
- // due to external linking.
- if uintptr(unsafe.Pointer(&noptrdata)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrdata)) ||
- uintptr(unsafe.Pointer(&data)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&edata)) ||
- uintptr(unsafe.Pointer(&bss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&ebss)) ||
- uintptr(unsafe.Pointer(&noptrbss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) {
- return
- }
- gothrow("runtime.SetFinalizer: pointer not in allocated block")
- }
-
- if e.data != base {
- // As an implementation detail we allow to set finalizers for an inner byte
- // of an object if it could come from tiny alloc (see mallocgc for details).
- if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
- gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block")
- }
- }
-
- f := (*eface)(unsafe.Pointer(&finalizer))
- ftyp := f._type
- if ftyp == nil {
- // switch to M stack and remove finalizer
- mp := acquirem()
- mp.ptrarg[0] = e.data
- onM(removeFinalizer_m)
- releasem(mp)
- return
- }
-
- if ftyp.kind&kindMask != kindFunc {
- gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function")
- }
- ft := (*functype)(unsafe.Pointer(ftyp))
- ins := *(*[]*_type)(unsafe.Pointer(&ft.in))
- if ft.dotdotdot || len(ins) != 1 {
- gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
- }
- fint := ins[0]
- switch {
- case fint == etyp:
- // ok - same type
- goto okarg
- case fint.kind&kindMask == kindPtr:
- if (fint.x == nil || fint.x.name == nil || etyp.x == nil || etyp.x.name == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
- // ok - not same type, but both pointers,
- // one or the other is unnamed, and same element type, so assignable.
- goto okarg
- }
- case fint.kind&kindMask == kindInterface:
- ityp := (*interfacetype)(unsafe.Pointer(fint))
- if len(ityp.mhdr) == 0 {
- // ok - satisfies empty interface
- goto okarg
- }
- if _, ok := assertE2I2(ityp, obj); ok {
- goto okarg
- }
- }
- gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
-okarg:
- // compute size needed for return parameters
- nret := uintptr(0)
- for _, t := range *(*[]*_type)(unsafe.Pointer(&ft.out)) {
- nret = round(nret, uintptr(t.align)) + uintptr(t.size)
- }
- nret = round(nret, ptrSize)
-
- // make sure we have a finalizer goroutine
- createfing()
-
- // switch to M stack to add finalizer record
- mp := acquirem()
- mp.ptrarg[0] = f.data
- mp.ptrarg[1] = e.data
- mp.scalararg[0] = nret
- mp.ptrarg[2] = unsafe.Pointer(fint)
- mp.ptrarg[3] = unsafe.Pointer(ot)
- onM(setFinalizer_m)
- if mp.scalararg[0] != 1 {
- gothrow("runtime.SetFinalizer: finalizer already set")
- }
- releasem(mp)
-}
-
-// round n up to a multiple of a. a must be a power of 2.
-func round(n, a uintptr) uintptr {
- return (n + a - 1) &^ (a - 1)
-}
-
-// Look up pointer v in heap. Return the span containing the object,
-// the start of the object, and the size of the object. If the object
-// does not exist, return nil, nil, 0.
-func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
- c := gomcache()
- c.local_nlookup++
- if ptrSize == 4 && c.local_nlookup >= 1<<30 {
- // purge cache stats to prevent overflow
- lock(&mheap_.lock)
- purgecachedstats(c)
- unlock(&mheap_.lock)
- }
-
- // find span
- arena_start := uintptr(unsafe.Pointer(mheap_.arena_start))
- arena_used := uintptr(unsafe.Pointer(mheap_.arena_used))
- if uintptr(v) < arena_start || uintptr(v) >= arena_used {
- return
- }
- p := uintptr(v) >> pageShift
- q := p - arena_start>>pageShift
- s = *(**mspan)(add(unsafe.Pointer(mheap_.spans), q*ptrSize))
- if s == nil {
- return
- }
- x = unsafe.Pointer(uintptr(s.start) << pageShift)
-
- if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
- s = nil
- x = nil
- return
- }
-
- n = uintptr(s.elemsize)
- if s.sizeclass != 0 {
- x = add(x, (uintptr(v)-uintptr(x))/n*n)
- }
- return
-}
-
-var fingCreate uint32
-
-func createfing() {
- // start the finalizer goroutine exactly once
- if fingCreate == 0 && cas(&fingCreate, 0, 1) {
- go runfinq()
- }
-}
-
-// This is the goroutine that runs all of the finalizers
-func runfinq() {
- var (
- frame unsafe.Pointer
- framecap uintptr
- )
-
- for {
- lock(&finlock)
- fb := finq
- finq = nil
- if fb == nil {
- gp := getg()
- fing = gp
- fingwait = true
- gp.issystem = true
- goparkunlock(&finlock, "finalizer wait")
- gp.issystem = false
- continue
- }
- unlock(&finlock)
- if raceenabled {
- racefingo()
- }
- for fb != nil {
- for i := int32(0); i < fb.cnt; i++ {
- f := (*finalizer)(add(unsafe.Pointer(&fb.fin), uintptr(i)*unsafe.Sizeof(finalizer{})))
-
- framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret)
- if framecap < framesz {
- // The frame does not contain pointers interesting for GC,
- // all not yet finalized objects are stored in finq.
- // If we do not mark it as FlagNoScan,
- // the last finalized object is not collected.
- frame = mallocgc(framesz, nil, flagNoScan)
- framecap = framesz
- }
-
- if f.fint == nil {
- gothrow("missing type in runfinq")
- }
- switch f.fint.kind & kindMask {
- case kindPtr:
- // direct use of pointer
- *(*unsafe.Pointer)(frame) = f.arg
- case kindInterface:
- ityp := (*interfacetype)(unsafe.Pointer(f.fint))
- // set up with empty interface
- (*eface)(frame)._type = &f.ot.typ
- (*eface)(frame).data = f.arg
- if len(ityp.mhdr) != 0 {
- // convert to interface with methods
- // this conversion is guaranteed to succeed - we checked in SetFinalizer
- *(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame))
- }
- default:
- gothrow("bad kind in runfinq")
- }
- reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
-
- // drop finalizer queue references to finalized object
- f.fn = nil
- f.arg = nil
- f.ot = nil
- }
- fb.cnt = 0
- next := fb.next
- lock(&finlock)
- fb.next = finc
- finc = fb
- unlock(&finlock)
- fb = next
- }
- }
-}
-
-var persistent struct {
- lock mutex
- pos unsafe.Pointer
- end unsafe.Pointer
-}
-
-// Wrapper around sysAlloc that can allocate small chunks.
-// There is no associated free operation.
-// Intended for things like function/type/debug-related persistent data.
-// If align is 0, uses default align (currently 8).
-func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
- const (
- chunk = 256 << 10
- maxBlock = 64 << 10 // VM reservation granularity is 64K on windows
- )
-
- if align != 0 {
- if align&(align-1) != 0 {
- gothrow("persistentalloc: align is not a power of 2")
- }
- if align > _PageSize {
- gothrow("persistentalloc: align is too large")
- }
- } else {
- align = 8
- }
-
- if size >= maxBlock {
- return sysAlloc(size, stat)
- }
-
- lock(&persistent.lock)
- persistent.pos = roundup(persistent.pos, align)
- if uintptr(persistent.pos)+size > uintptr(persistent.end) {
- persistent.pos = sysAlloc(chunk, &memstats.other_sys)
- if persistent.pos == nil {
- unlock(&persistent.lock)
- gothrow("runtime: cannot allocate memory")
- }
- persistent.end = add(persistent.pos, chunk)
- }
- p := persistent.pos
- persistent.pos = add(persistent.pos, size)
- unlock(&persistent.lock)
-
- if stat != &memstats.other_sys {
- xadd64(stat, int64(size))
- xadd64(&memstats.other_sys, -int64(size))
- }
- return p
-}
diff --git a/libgo/go/runtime/malloc_test.go b/libgo/go/runtime/malloc_test.go
index 054f6a7..df6a0e5 100644
--- a/libgo/go/runtime/malloc_test.go
+++ b/libgo/go/runtime/malloc_test.go
@@ -13,17 +13,74 @@ import (
)
func TestMemStats(t *testing.T) {
+ t.Skip("skipping test with gccgo")
// Test that MemStats has sane values.
st := new(MemStats)
ReadMemStats(st)
- if st.HeapSys == 0 || /* st.StackSys == 0 || */ st.MSpanSys == 0 || st.MCacheSys == 0 ||
- st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 {
- t.Fatalf("Zero sys value: %+v", *st)
+
+ // Everything except HeapReleased and HeapIdle, because they indeed can be 0.
+ if st.Alloc == 0 || st.TotalAlloc == 0 || st.Sys == 0 || st.Lookups == 0 ||
+ st.Mallocs == 0 || st.Frees == 0 || st.HeapAlloc == 0 || st.HeapSys == 0 ||
+ st.HeapInuse == 0 || st.HeapObjects == 0 || st.StackInuse == 0 ||
+ st.StackSys == 0 || st.MSpanInuse == 0 || st.MSpanSys == 0 || st.MCacheInuse == 0 ||
+ st.MCacheSys == 0 || st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 ||
+ st.NextGC == 0 || st.NumGC == 0 {
+ t.Fatalf("Zero value: %+v", *st)
+ }
+
+ if st.Alloc > 1e10 || st.TotalAlloc > 1e11 || st.Sys > 1e10 || st.Lookups > 1e10 ||
+ st.Mallocs > 1e10 || st.Frees > 1e10 || st.HeapAlloc > 1e10 || st.HeapSys > 1e10 ||
+ st.HeapIdle > 1e10 || st.HeapInuse > 1e10 || st.HeapObjects > 1e10 || st.StackInuse > 1e10 ||
+ st.StackSys > 1e10 || st.MSpanInuse > 1e10 || st.MSpanSys > 1e10 || st.MCacheInuse > 1e10 ||
+ st.MCacheSys > 1e10 || st.BuckHashSys > 1e10 || st.GCSys > 1e10 || st.OtherSys > 1e10 ||
+ st.NextGC > 1e10 || st.NumGC > 1e9 || st.PauseTotalNs > 1e11 {
+ t.Fatalf("Insanely high value (overflow?): %+v", *st)
}
if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
st.BuckHashSys+st.GCSys+st.OtherSys {
t.Fatalf("Bad sys value: %+v", *st)
}
+
+ if st.HeapIdle+st.HeapInuse != st.HeapSys {
+ t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
+ }
+
+ if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
+ t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
+ }
+
+ var pauseTotal uint64
+ for _, pause := range st.PauseNs {
+ pauseTotal += pause
+ }
+ if int(st.NumGC) < len(st.PauseNs) {
+ // We have all pauses, so this should be exact.
+ if st.PauseTotalNs != pauseTotal {
+ t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
+ }
+ } else {
+ if st.PauseTotalNs < pauseTotal {
+ t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
+ }
+ }
+}
+
+func TestStringConcatenationAllocs(t *testing.T) {
+ t.Skip("skipping test with gccgo")
+ n := testing.AllocsPerRun(1e3, func() {
+ b := make([]byte, 10)
+ for i := 0; i < 10; i++ {
+ b[i] = byte(i) + '0'
+ }
+ s := "foo" + string(b)
+ if want := "foo0123456789"; s != want {
+ t.Fatalf("want %v, got %v", want, s)
+ }
+ })
+ // Only string concatenation allocates.
+ if n != 1 {
+ t.Fatalf("want 1 allocation, got %v", n)
+ }
}
var mallocSink uintptr
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index 9ed183b..ed0347a 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -391,7 +391,7 @@ func TestMapNanGrowIterator(t *testing.T) {
nan := math.NaN()
const nBuckets = 16
// To fill nBuckets buckets takes LOAD * nBuckets keys.
- nKeys := int(nBuckets * *runtime.HashLoad)
+ nKeys := int(nBuckets * /* *runtime.HashLoad */ 6.5)
// Get map to full point with nan keys.
for i := 0; i < nKeys; i++ {
@@ -537,6 +537,61 @@ func TestMapStringBytesLookup(t *testing.T) {
}
}
+func TestMapLargeKeyNoPointer(t *testing.T) {
+ const (
+ I = 1000
+ N = 64
+ )
+ type T [N]int
+ m := make(map[T]int)
+ for i := 0; i < I; i++ {
+ var v T
+ for j := 0; j < N; j++ {
+ v[j] = i + j
+ }
+ m[v] = i
+ }
+ runtime.GC()
+ for i := 0; i < I; i++ {
+ var v T
+ for j := 0; j < N; j++ {
+ v[j] = i + j
+ }
+ if m[v] != i {
+ t.Fatalf("corrupted map: want %+v, got %+v", i, m[v])
+ }
+ }
+}
+
+func TestMapLargeValNoPointer(t *testing.T) {
+ const (
+ I = 1000
+ N = 64
+ )
+ type T [N]int
+ m := make(map[int]T)
+ for i := 0; i < I; i++ {
+ var v T
+ for j := 0; j < N; j++ {
+ v[j] = i + j
+ }
+ m[i] = v
+ }
+ runtime.GC()
+ for i := 0; i < I; i++ {
+ var v T
+ for j := 0; j < N; j++ {
+ v[j] = i + j
+ }
+ v1 := m[i]
+ for j := 0; j < N; j++ {
+ if v1[j] != v[j] {
+ t.Fatalf("corrupted map: want %+v, got %+v", v, v1)
+ }
+ }
+ }
+}
+
func benchmarkMapPop(b *testing.B, n int) {
m := map[int]int{}
for i := 0; i < b.N; i++ {
@@ -557,3 +612,14 @@ func benchmarkMapPop(b *testing.B, n int) {
func BenchmarkMapPop100(b *testing.B) { benchmarkMapPop(b, 100) }
func BenchmarkMapPop1000(b *testing.B) { benchmarkMapPop(b, 1000) }
func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) }
+
+func TestNonEscapingMap(t *testing.T) {
+ t.Skip("does not work on gccgo without better escape analysis")
+ n := testing.AllocsPerRun(1000, func() {
+ m := make(map[int]int)
+ m[0] = 0
+ })
+ if n != 0 {
+ t.Fatalf("want 0 allocs, got %v", n)
+ }
+}
diff --git a/libgo/go/runtime/mapspeed_test.go b/libgo/go/runtime/mapspeed_test.go
index 119eb3f..ac93119 100644
--- a/libgo/go/runtime/mapspeed_test.go
+++ b/libgo/go/runtime/mapspeed_test.go
@@ -234,6 +234,15 @@ func BenchmarkNewEmptyMap(b *testing.B) {
}
}
+func BenchmarkNewSmallMap(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ m := make(map[int]int)
+ m[0] = 0
+ m[1] = 1
+ }
+}
+
func BenchmarkMapIter(b *testing.B) {
m := make(map[int]bool)
for i := 0; i < 8; i++ {
@@ -298,3 +307,22 @@ func BenchmarkSmallKeyMap(b *testing.B) {
_ = m[5]
}
}
+
+type ComplexAlgKey struct {
+ a, b, c int64
+ _ int
+ d int32
+ _ int
+ e string
+ _ int
+ f, g, h int64
+}
+
+func BenchmarkComplexAlgMap(b *testing.B) {
+ m := make(map[ComplexAlgKey]bool)
+ var k ComplexAlgKey
+ m[k] = true
+ for i := 0; i < b.N; i++ {
+ _ = m[k]
+ }
+}
diff --git a/libgo/go/runtime/mem.go b/libgo/go/runtime/mem.go
index fb35535..b41d741 100644
--- a/libgo/go/runtime/mem.go
+++ b/libgo/go/runtime/mem.go
@@ -41,13 +41,15 @@ type MemStats struct {
OtherSys uint64 // other system allocations
// Garbage collector statistics.
- NextGC uint64 // next run in HeapAlloc time (bytes)
- LastGC uint64 // last run in absolute time (ns)
- PauseTotalNs uint64
- PauseNs [256]uint64 // circular buffer of recent GC pause times, most recent at [(NumGC+255)%256]
- NumGC uint32
- EnableGC bool
- DebugGC bool
+ NextGC uint64 // next run in HeapAlloc time (bytes)
+ LastGC uint64 // last run in absolute time (ns)
+ PauseTotalNs uint64
+ PauseNs [256]uint64 // circular buffer of recent GC pause times, most recent at [(NumGC+255)%256]
+ PauseEnd [256]uint64 // circular buffer of recent GC pause end times
+ NumGC uint32
+ GCCPUFraction float64 // fraction of CPU time used by GC
+ EnableGC bool
+ DebugGC bool
// Per-size allocation statistics.
// 61 is NumSizeClasses in the C code.
diff --git a/libgo/go/runtime/memmove_test.go b/libgo/go/runtime/memmove_test.go
deleted file mode 100644
index ffda4fe..0000000
--- a/libgo/go/runtime/memmove_test.go
+++ /dev/null
@@ -1,295 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime_test
-
-import (
- . "runtime"
- "testing"
-)
-
-func TestMemmove(t *testing.T) {
- size := 256
- if testing.Short() {
- size = 128 + 16
- }
- src := make([]byte, size)
- dst := make([]byte, size)
- for i := 0; i < size; i++ {
- src[i] = byte(128 + (i & 127))
- }
- for i := 0; i < size; i++ {
- dst[i] = byte(i & 127)
- }
- for n := 0; n <= size; n++ {
- for x := 0; x <= size-n; x++ { // offset in src
- for y := 0; y <= size-n; y++ { // offset in dst
- copy(dst[y:y+n], src[x:x+n])
- for i := 0; i < y; i++ {
- if dst[i] != byte(i&127) {
- t.Fatalf("prefix dst[%d] = %d", i, dst[i])
- }
- }
- for i := y; i < y+n; i++ {
- if dst[i] != byte(128+((i-y+x)&127)) {
- t.Fatalf("copied dst[%d] = %d", i, dst[i])
- }
- dst[i] = byte(i & 127) // reset dst
- }
- for i := y + n; i < size; i++ {
- if dst[i] != byte(i&127) {
- t.Fatalf("suffix dst[%d] = %d", i, dst[i])
- }
- }
- }
- }
- }
-}
-
-func TestMemmoveAlias(t *testing.T) {
- size := 256
- if testing.Short() {
- size = 128 + 16
- }
- buf := make([]byte, size)
- for i := 0; i < size; i++ {
- buf[i] = byte(i)
- }
- for n := 0; n <= size; n++ {
- for x := 0; x <= size-n; x++ { // src offset
- for y := 0; y <= size-n; y++ { // dst offset
- copy(buf[y:y+n], buf[x:x+n])
- for i := 0; i < y; i++ {
- if buf[i] != byte(i) {
- t.Fatalf("prefix buf[%d] = %d", i, buf[i])
- }
- }
- for i := y; i < y+n; i++ {
- if buf[i] != byte(i-y+x) {
- t.Fatalf("copied buf[%d] = %d", i, buf[i])
- }
- buf[i] = byte(i) // reset buf
- }
- for i := y + n; i < size; i++ {
- if buf[i] != byte(i) {
- t.Fatalf("suffix buf[%d] = %d", i, buf[i])
- }
- }
- }
- }
- }
-}
-
-func bmMemmove(b *testing.B, n int) {
- x := make([]byte, n)
- y := make([]byte, n)
- b.SetBytes(int64(n))
- for i := 0; i < b.N; i++ {
- copy(x, y)
- }
-}
-
-func BenchmarkMemmove0(b *testing.B) { bmMemmove(b, 0) }
-func BenchmarkMemmove1(b *testing.B) { bmMemmove(b, 1) }
-func BenchmarkMemmove2(b *testing.B) { bmMemmove(b, 2) }
-func BenchmarkMemmove3(b *testing.B) { bmMemmove(b, 3) }
-func BenchmarkMemmove4(b *testing.B) { bmMemmove(b, 4) }
-func BenchmarkMemmove5(b *testing.B) { bmMemmove(b, 5) }
-func BenchmarkMemmove6(b *testing.B) { bmMemmove(b, 6) }
-func BenchmarkMemmove7(b *testing.B) { bmMemmove(b, 7) }
-func BenchmarkMemmove8(b *testing.B) { bmMemmove(b, 8) }
-func BenchmarkMemmove9(b *testing.B) { bmMemmove(b, 9) }
-func BenchmarkMemmove10(b *testing.B) { bmMemmove(b, 10) }
-func BenchmarkMemmove11(b *testing.B) { bmMemmove(b, 11) }
-func BenchmarkMemmove12(b *testing.B) { bmMemmove(b, 12) }
-func BenchmarkMemmove13(b *testing.B) { bmMemmove(b, 13) }
-func BenchmarkMemmove14(b *testing.B) { bmMemmove(b, 14) }
-func BenchmarkMemmove15(b *testing.B) { bmMemmove(b, 15) }
-func BenchmarkMemmove16(b *testing.B) { bmMemmove(b, 16) }
-func BenchmarkMemmove32(b *testing.B) { bmMemmove(b, 32) }
-func BenchmarkMemmove64(b *testing.B) { bmMemmove(b, 64) }
-func BenchmarkMemmove128(b *testing.B) { bmMemmove(b, 128) }
-func BenchmarkMemmove256(b *testing.B) { bmMemmove(b, 256) }
-func BenchmarkMemmove512(b *testing.B) { bmMemmove(b, 512) }
-func BenchmarkMemmove1024(b *testing.B) { bmMemmove(b, 1024) }
-func BenchmarkMemmove2048(b *testing.B) { bmMemmove(b, 2048) }
-func BenchmarkMemmove4096(b *testing.B) { bmMemmove(b, 4096) }
-
-func TestMemclr(t *testing.T) {
- size := 512
- if testing.Short() {
- size = 128 + 16
- }
- mem := make([]byte, size)
- for i := 0; i < size; i++ {
- mem[i] = 0xee
- }
- for n := 0; n < size; n++ {
- for x := 0; x <= size-n; x++ { // offset in mem
- MemclrBytes(mem[x : x+n])
- for i := 0; i < x; i++ {
- if mem[i] != 0xee {
- t.Fatalf("overwrite prefix mem[%d] = %d", i, mem[i])
- }
- }
- for i := x; i < x+n; i++ {
- if mem[i] != 0 {
- t.Fatalf("failed clear mem[%d] = %d", i, mem[i])
- }
- mem[i] = 0xee
- }
- for i := x + n; i < size; i++ {
- if mem[i] != 0xee {
- t.Fatalf("overwrite suffix mem[%d] = %d", i, mem[i])
- }
- }
- }
- }
-}
-
-func bmMemclr(b *testing.B, n int) {
- x := make([]byte, n)
- b.SetBytes(int64(n))
- for i := 0; i < b.N; i++ {
- MemclrBytes(x)
- }
-}
-func BenchmarkMemclr5(b *testing.B) { bmMemclr(b, 5) }
-func BenchmarkMemclr16(b *testing.B) { bmMemclr(b, 16) }
-func BenchmarkMemclr64(b *testing.B) { bmMemclr(b, 64) }
-func BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) }
-func BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) }
-func BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) }
-
-func BenchmarkClearFat8(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [8 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat12(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [12 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat16(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [16 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat24(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [24 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat32(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [32 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat64(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [64 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat128(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [128 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat256(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [256 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat512(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [512 / 4]uint32
- _ = x
- }
-}
-func BenchmarkClearFat1024(b *testing.B) {
- for i := 0; i < b.N; i++ {
- var x [1024 / 4]uint32
- _ = x
- }
-}
-
-func BenchmarkCopyFat8(b *testing.B) {
- var x [8 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat12(b *testing.B) {
- var x [12 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat16(b *testing.B) {
- var x [16 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat24(b *testing.B) {
- var x [24 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat32(b *testing.B) {
- var x [32 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat64(b *testing.B) {
- var x [64 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat128(b *testing.B) {
- var x [128 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat256(b *testing.B) {
- var x [256 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat512(b *testing.B) {
- var x [512 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
-func BenchmarkCopyFat1024(b *testing.B) {
- var x [1024 / 4]uint32
- for i := 0; i < b.N; i++ {
- y := x
- _ = y
- }
-}
diff --git a/libgo/go/runtime/mfinal_test.go b/libgo/go/runtime/mfinal_test.go
index ab7c8ae..38c2623 100644
--- a/libgo/go/runtime/mfinal_test.go
+++ b/libgo/go/runtime/mfinal_test.go
@@ -177,9 +177,6 @@ func adjChunks() (*objtype, *objtype) {
// Make sure an empty slice on the stack doesn't pin the next object in memory.
func TestEmptySlice(t *testing.T) {
- if true { // disable until bug 7564 is fixed.
- return
- }
if runtime.Compiler == "gccgo" {
t.Skip("skipping for gccgo")
}
diff --git a/libgo/go/runtime/mgc0.go b/libgo/go/runtime/mgc0.go
deleted file mode 100644
index cbf5e9c..0000000
--- a/libgo/go/runtime/mgc0.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-// Called from C. Returns the Go type *m.
-func gc_m_ptr(ret *interface{}) {
- *ret = (*m)(nil)
-}
-
-// Called from C. Returns the Go type *g.
-func gc_g_ptr(ret *interface{}) {
- *ret = (*g)(nil)
-}
-
-// Called from C. Returns the Go type *itab.
-func gc_itab_ptr(ret *interface{}) {
- *ret = (*itab)(nil)
-}
-
-func gc_unixnanotime(now *int64) {
- sec, nsec := timenow()
- *now = sec*1e9 + int64(nsec)
-}
-
-func freeOSMemory() {
- gogc(2) // force GC and do eager sweep
- onM(scavenge_m)
-}
-
-var poolcleanup func()
-
-func registerPoolCleanup(f func()) {
- poolcleanup = f
-}
-
-func clearpools() {
- // clear sync.Pools
- if poolcleanup != nil {
- poolcleanup()
- }
-
- for _, p := range &allp {
- if p == nil {
- break
- }
- // clear tinyalloc pool
- if c := p.mcache; c != nil {
- c.tiny = nil
- c.tinysize = 0
-
- // disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- var sg, sgnext *sudog
- for sg = c.sudogcache; sg != nil; sg = sgnext {
- sgnext = sg.next
- sg.next = nil
- }
- c.sudogcache = nil
- }
-
- // clear defer pools
- for i := range p.deferpool {
- // disconnect cached list before dropping it on the floor,
- // so that a dangling ref to one entry does not pin all of them.
- var d, dlink *_defer
- for d = p.deferpool[i]; d != nil; d = dlink {
- dlink = d.link
- d.link = nil
- }
- p.deferpool[i] = nil
- }
- }
-}
-
-func gosweepone() uintptr
-func gosweepdone() bool
-
-func bgsweep() {
- getg().issystem = true
- for {
- for gosweepone() != ^uintptr(0) {
- sweep.nbgsweep++
- Gosched()
- }
- lock(&gclock)
- if !gosweepdone() {
- // This can happen if a GC runs between
- // gosweepone returning ^0 above
- // and the lock being acquired.
- unlock(&gclock)
- continue
- }
- sweep.parked = true
- goparkunlock(&gclock, "GC sweep wait")
- }
-}
-
-// NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
-// but if we do that, Go inserts a write barrier on *dst = src.
-//go:nosplit
-func writebarrierptr(dst *uintptr, src uintptr) {
- *dst = src
-}
-
-//go:nosplit
-func writebarrierstring(dst *[2]uintptr, src [2]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
-}
-
-//go:nosplit
-func writebarrierslice(dst *[3]uintptr, src [3]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
- dst[2] = src[2]
-}
-
-//go:nosplit
-func writebarrieriface(dst *[2]uintptr, src [2]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
-}
-
-//go:nosplit
-func writebarrierfat2(dst *[2]uintptr, _ *byte, src [2]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
-}
-
-//go:nosplit
-func writebarrierfat3(dst *[3]uintptr, _ *byte, src [3]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
- dst[2] = src[2]
-}
-
-//go:nosplit
-func writebarrierfat4(dst *[4]uintptr, _ *byte, src [4]uintptr) {
- dst[0] = src[0]
- dst[1] = src[1]
- dst[2] = src[2]
- dst[3] = src[3]
-}
-
-//go:nosplit
-func writebarrierfat(typ *_type, dst, src unsafe.Pointer) {
- memmove(dst, src, typ.size)
-}
diff --git a/libgo/go/runtime/mprof.go b/libgo/go/runtime/mprof.go
deleted file mode 100644
index f4da45f..0000000
--- a/libgo/go/runtime/mprof.go
+++ /dev/null
@@ -1,668 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Malloc profiling.
-// Patterned after tcmalloc's algorithms; shorter code.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-// NOTE(rsc): Everything here could use cas if contention became an issue.
-var proflock mutex
-
-// All memory allocations are local and do not escape outside of the profiler.
-// The profiler is forbidden from referring to garbage-collected memory.
-
-const (
- // profile types
- memProfile bucketType = 1 + iota
- blockProfile
-
- // size of bucket hash table
- buckHashSize = 179999
-
- // max depth of stack to record in bucket
- maxStack = 32
-)
-
-type bucketType int
-
-// A bucket holds per-call-stack profiling information.
-// The representation is a bit sleazy, inherited from C.
-// This struct defines the bucket header. It is followed in
-// memory by the stack words and then the actual record
-// data, either a memRecord or a blockRecord.
-//
-// Per-call-stack profiling information.
-// Lookup by hashing call stack into a linked-list hash table.
-type bucket struct {
- next *bucket
- allnext *bucket
- typ bucketType // memBucket or blockBucket
- hash uintptr
- size uintptr
- nstk uintptr
-}
-
-// A memRecord is the bucket data for a bucket of type memProfile,
-// part of the memory profile.
-type memRecord struct {
- // The following complex 3-stage scheme of stats accumulation
- // is required to obtain a consistent picture of mallocs and frees
- // for some point in time.
- // The problem is that mallocs come in real time, while frees
- // come only after a GC during concurrent sweeping. So if we would
- // naively count them, we would get a skew toward mallocs.
- //
- // Mallocs are accounted in recent stats.
- // Explicit frees are accounted in recent stats.
- // GC frees are accounted in prev stats.
- // After GC prev stats are added to final stats and
- // recent stats are moved into prev stats.
- allocs uintptr
- frees uintptr
- alloc_bytes uintptr
- free_bytes uintptr
-
- // changes between next-to-last GC and last GC
- prev_allocs uintptr
- prev_frees uintptr
- prev_alloc_bytes uintptr
- prev_free_bytes uintptr
-
- // changes since last GC
- recent_allocs uintptr
- recent_frees uintptr
- recent_alloc_bytes uintptr
- recent_free_bytes uintptr
-}
-
-// A blockRecord is the bucket data for a bucket of type blockProfile,
-// part of the blocking profile.
-type blockRecord struct {
- count int64
- cycles int64
-}
-
-var (
- mbuckets *bucket // memory profile buckets
- bbuckets *bucket // blocking profile buckets
- buckhash *[179999]*bucket
- bucketmem uintptr
-)
-
-// newBucket allocates a bucket with the given type and number of stack entries.
-func newBucket(typ bucketType, nstk int) *bucket {
- size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
- switch typ {
- default:
- gothrow("invalid profile bucket type")
- case memProfile:
- size += unsafe.Sizeof(memRecord{})
- case blockProfile:
- size += unsafe.Sizeof(blockRecord{})
- }
-
- b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
- bucketmem += size
- b.typ = typ
- b.nstk = uintptr(nstk)
- return b
-}
-
-// stk returns the slice in b holding the stack.
-func (b *bucket) stk() []uintptr {
- stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
- return stk[:b.nstk:b.nstk]
-}
-
-// mp returns the memRecord associated with the memProfile bucket b.
-func (b *bucket) mp() *memRecord {
- if b.typ != memProfile {
- gothrow("bad use of bucket.mp")
- }
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
- return (*memRecord)(data)
-}
-
-// bp returns the blockRecord associated with the blockProfile bucket b.
-func (b *bucket) bp() *blockRecord {
- if b.typ != blockProfile {
- gothrow("bad use of bucket.bp")
- }
- data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
- return (*blockRecord)(data)
-}
-
-// Return the bucket for stk[0:nstk], allocating new bucket if needed.
-func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
- if buckhash == nil {
- buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
- if buckhash == nil {
- gothrow("runtime: cannot allocate memory")
- }
- }
-
- // Hash stack.
- var h uintptr
- for _, pc := range stk {
- h += pc
- h += h << 10
- h ^= h >> 6
- }
- // hash in size
- h += size
- h += h << 10
- h ^= h >> 6
- // finalize
- h += h << 3
- h ^= h >> 11
-
- i := int(h % buckHashSize)
- for b := buckhash[i]; b != nil; b = b.next {
- if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
- return b
- }
- }
-
- if !alloc {
- return nil
- }
-
- // Create new bucket.
- b := newBucket(typ, len(stk))
- copy(b.stk(), stk)
- b.hash = h
- b.size = size
- b.next = buckhash[i]
- buckhash[i] = b
- if typ == memProfile {
- b.allnext = mbuckets
- mbuckets = b
- } else {
- b.allnext = bbuckets
- bbuckets = b
- }
- return b
-}
-
-func sysAlloc(n uintptr, stat *uint64) unsafe.Pointer
-
-func eqslice(x, y []uintptr) bool {
- if len(x) != len(y) {
- return false
- }
- for i, xi := range x {
- if xi != y[i] {
- return false
- }
- }
- return true
-}
-
-func mprof_GC() {
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- mp.allocs += mp.prev_allocs
- mp.frees += mp.prev_frees
- mp.alloc_bytes += mp.prev_alloc_bytes
- mp.free_bytes += mp.prev_free_bytes
-
- mp.prev_allocs = mp.recent_allocs
- mp.prev_frees = mp.recent_frees
- mp.prev_alloc_bytes = mp.recent_alloc_bytes
- mp.prev_free_bytes = mp.recent_free_bytes
-
- mp.recent_allocs = 0
- mp.recent_frees = 0
- mp.recent_alloc_bytes = 0
- mp.recent_free_bytes = 0
- }
-}
-
-// Record that a gc just happened: all the 'recent' statistics are now real.
-func mProf_GC() {
- lock(&proflock)
- mprof_GC()
- unlock(&proflock)
-}
-
-// Called by malloc to record a profiled block.
-func mProf_Malloc(p unsafe.Pointer, size uintptr) {
- var stk [maxStack]uintptr
- nstk := callers(4, &stk[0], len(stk))
- lock(&proflock)
- b := stkbucket(memProfile, size, stk[:nstk], true)
- mp := b.mp()
- mp.recent_allocs++
- mp.recent_alloc_bytes += size
- unlock(&proflock)
-
- // Setprofilebucket locks a bunch of other mutexes, so we call it outside of proflock.
- // This reduces potential contention and chances of deadlocks.
- // Since the object must be alive during call to mProf_Malloc,
- // it's fine to do this non-atomically.
- setprofilebucket(p, b)
-}
-
-func setprofilebucket_m() // mheap.c
-
-func setprofilebucket(p unsafe.Pointer, b *bucket) {
- g := getg()
- g.m.ptrarg[0] = p
- g.m.ptrarg[1] = unsafe.Pointer(b)
- onM(setprofilebucket_m)
-}
-
-// Called when freeing a profiled block.
-func mProf_Free(b *bucket, size uintptr, freed bool) {
- lock(&proflock)
- mp := b.mp()
- if freed {
- mp.recent_frees++
- mp.recent_free_bytes += size
- } else {
- mp.prev_frees++
- mp.prev_free_bytes += size
- }
- unlock(&proflock)
-}
-
-var blockprofilerate uint64 // in CPU ticks
-
-// SetBlockProfileRate controls the fraction of goroutine blocking events
-// that are reported in the blocking profile. The profiler aims to sample
-// an average of one blocking event per rate nanoseconds spent blocked.
-//
-// To include every blocking event in the profile, pass rate = 1.
-// To turn off profiling entirely, pass rate <= 0.
-func SetBlockProfileRate(rate int) {
- var r int64
- if rate <= 0 {
- r = 0 // disable profiling
- } else if rate == 1 {
- r = 1 // profile everything
- } else {
- // convert ns to cycles, use float64 to prevent overflow during multiplication
- r = int64(float64(rate) * float64(tickspersecond()) / (1000 * 1000 * 1000))
- if r == 0 {
- r = 1
- }
- }
-
- atomicstore64(&blockprofilerate, uint64(r))
-}
-
-func blockevent(cycles int64, skip int) {
- if cycles <= 0 {
- cycles = 1
- }
- rate := int64(atomicload64(&blockprofilerate))
- if rate <= 0 || (rate > cycles && int64(fastrand1())%rate > cycles) {
- return
- }
- gp := getg()
- var nstk int
- var stk [maxStack]uintptr
- if gp.m.curg == nil || gp.m.curg == gp {
- nstk = callers(skip, &stk[0], len(stk))
- } else {
- nstk = gcallers(gp.m.curg, skip, &stk[0], len(stk))
- }
- lock(&proflock)
- b := stkbucket(blockProfile, 0, stk[:nstk], true)
- b.bp().count++
- b.bp().cycles += cycles
- unlock(&proflock)
-}
-
-// Go interface to profile data.
-
-// A StackRecord describes a single execution stack.
-type StackRecord struct {
- Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
-}
-
-// Stack returns the stack trace associated with the record,
-// a prefix of r.Stack0.
-func (r *StackRecord) Stack() []uintptr {
- for i, v := range r.Stack0 {
- if v == 0 {
- return r.Stack0[0:i]
- }
- }
- return r.Stack0[0:]
-}
-
-// MemProfileRate controls the fraction of memory allocations
-// that are recorded and reported in the memory profile.
-// The profiler aims to sample an average of
-// one allocation per MemProfileRate bytes allocated.
-//
-// To include every allocated block in the profile, set MemProfileRate to 1.
-// To turn off profiling entirely, set MemProfileRate to 0.
-//
-// The tools that process the memory profiles assume that the
-// profile rate is constant across the lifetime of the program
-// and equal to the current value. Programs that change the
-// memory profiling rate should do so just once, as early as
-// possible in the execution of the program (for example,
-// at the beginning of main).
-var MemProfileRate int = 512 * 1024
-
-// A MemProfileRecord describes the live objects allocated
-// by a particular call sequence (stack trace).
-type MemProfileRecord struct {
- AllocBytes, FreeBytes int64 // number of bytes allocated, freed
- AllocObjects, FreeObjects int64 // number of objects allocated, freed
- Stack0 [32]uintptr // stack trace for this record; ends at first 0 entry
-}
-
-// InUseBytes returns the number of bytes in use (AllocBytes - FreeBytes).
-func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
-
-// InUseObjects returns the number of objects in use (AllocObjects - FreeObjects).
-func (r *MemProfileRecord) InUseObjects() int64 {
- return r.AllocObjects - r.FreeObjects
-}
-
-// Stack returns the stack trace associated with the record,
-// a prefix of r.Stack0.
-func (r *MemProfileRecord) Stack() []uintptr {
- for i, v := range r.Stack0 {
- if v == 0 {
- return r.Stack0[0:i]
- }
- }
- return r.Stack0[0:]
-}
-
-// MemProfile returns n, the number of records in the current memory profile.
-// If len(p) >= n, MemProfile copies the profile into p and returns n, true.
-// If len(p) < n, MemProfile does not change p and returns n, false.
-//
-// If inuseZero is true, the profile includes allocation records
-// where r.AllocBytes > 0 but r.AllocBytes == r.FreeBytes.
-// These are sites where memory was allocated, but it has all
-// been released back to the runtime.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.memprofile flag instead
-// of calling MemProfile directly.
-func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
- lock(&proflock)
- clear := true
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- if inuseZero || mp.alloc_bytes != mp.free_bytes {
- n++
- }
- if mp.allocs != 0 || mp.frees != 0 {
- clear = false
- }
- }
- if clear {
- // Absolutely no data, suggesting that a garbage collection
- // has not yet happened. In order to allow profiling when
- // garbage collection is disabled from the beginning of execution,
- // accumulate stats as if a GC just happened, and recount buckets.
- mprof_GC()
- mprof_GC()
- n = 0
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- if inuseZero || mp.alloc_bytes != mp.free_bytes {
- n++
- }
- }
- }
- if n <= len(p) {
- ok = true
- idx := 0
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- if inuseZero || mp.alloc_bytes != mp.free_bytes {
- record(&p[idx], b)
- idx++
- }
- }
- }
- unlock(&proflock)
- return
-}
-
-// Write b's data to r.
-func record(r *MemProfileRecord, b *bucket) {
- mp := b.mp()
- r.AllocBytes = int64(mp.alloc_bytes)
- r.FreeBytes = int64(mp.free_bytes)
- r.AllocObjects = int64(mp.allocs)
- r.FreeObjects = int64(mp.frees)
- copy(r.Stack0[:], b.stk())
- for i := int(b.nstk); i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
-}
-
-func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
- lock(&proflock)
- for b := mbuckets; b != nil; b = b.allnext {
- mp := b.mp()
- fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees)
- }
- unlock(&proflock)
-}
-
-// BlockProfileRecord describes blocking events originated
-// at a particular call sequence (stack trace).
-type BlockProfileRecord struct {
- Count int64
- Cycles int64
- StackRecord
-}
-
-// BlockProfile returns n, the number of records in the current blocking profile.
-// If len(p) >= n, BlockProfile copies the profile into p and returns n, true.
-// If len(p) < n, BlockProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package or
-// the testing package's -test.blockprofile flag instead
-// of calling BlockProfile directly.
-func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
- lock(&proflock)
- for b := bbuckets; b != nil; b = b.allnext {
- n++
- }
- if n <= len(p) {
- ok = true
- for b := bbuckets; b != nil; b = b.allnext {
- bp := b.bp()
- r := &p[0]
- r.Count = int64(bp.count)
- r.Cycles = int64(bp.cycles)
- i := copy(r.Stack0[:], b.stk())
- for ; i < len(r.Stack0); i++ {
- r.Stack0[i] = 0
- }
- p = p[1:]
- }
- }
- unlock(&proflock)
- return
-}
-
-// ThreadCreateProfile returns n, the number of records in the thread creation profile.
-// If len(p) >= n, ThreadCreateProfile copies the profile into p and returns n, true.
-// If len(p) < n, ThreadCreateProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package instead
-// of calling ThreadCreateProfile directly.
-func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
- first := (*m)(atomicloadp(unsafe.Pointer(&allm)))
- for mp := first; mp != nil; mp = mp.alllink {
- n++
- }
- if n <= len(p) {
- ok = true
- i := 0
- for mp := first; mp != nil; mp = mp.alllink {
- for s := range mp.createstack {
- p[i].Stack0[s] = uintptr(mp.createstack[s])
- }
- i++
- }
- }
- return
-}
-
-var allgs []*g // proc.c
-
-// GoroutineProfile returns n, the number of records in the active goroutine stack profile.
-// If len(p) >= n, GoroutineProfile copies the profile into p and returns n, true.
-// If len(p) < n, GoroutineProfile does not change p and returns n, false.
-//
-// Most clients should use the runtime/pprof package instead
-// of calling GoroutineProfile directly.
-func GoroutineProfile(p []StackRecord) (n int, ok bool) {
-
- n = NumGoroutine()
- if n <= len(p) {
- gp := getg()
- semacquire(&worldsema, false)
- gp.m.gcing = 1
- onM(stoptheworld)
-
- n = NumGoroutine()
- if n <= len(p) {
- ok = true
- r := p
- sp := getcallersp(unsafe.Pointer(&p))
- pc := getcallerpc(unsafe.Pointer(&p))
- onM(func() {
- saveg(pc, sp, gp, &r[0])
- })
- r = r[1:]
- for _, gp1 := range allgs {
- if gp1 == gp || readgstatus(gp1) == _Gdead {
- continue
- }
- saveg(^uintptr(0), ^uintptr(0), gp1, &r[0])
- r = r[1:]
- }
- }
-
- gp.m.gcing = 0
- semrelease(&worldsema)
- onM(starttheworld)
- }
-
- return n, ok
-}
-
-func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
- n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0)
- if n < len(r.Stack0) {
- r.Stack0[n] = 0
- }
-}
-
-// Stack formats a stack trace of the calling goroutine into buf
-// and returns the number of bytes written to buf.
-// If all is true, Stack formats stack traces of all other goroutines
-// into buf after the trace for the current goroutine.
-func Stack(buf []byte, all bool) int {
- if all {
- semacquire(&worldsema, false)
- gp := getg()
- gp.m.gcing = 1
- onM(stoptheworld)
- }
-
- n := 0
- if len(buf) > 0 {
- gp := getg()
- sp := getcallersp(unsafe.Pointer(&buf))
- pc := getcallerpc(unsafe.Pointer(&buf))
- onM(func() {
- g0 := getg()
- g0.writebuf = buf[0:0:len(buf)]
- goroutineheader(gp)
- traceback(pc, sp, 0, gp)
- if all {
- tracebackothers(gp)
- }
- n = len(g0.writebuf)
- g0.writebuf = nil
- })
- }
-
- if all {
- gp := getg()
- gp.m.gcing = 0
- semrelease(&worldsema)
- onM(starttheworld)
- }
- return n
-}
-
-// Tracing of alloc/free/gc.
-
-var tracelock mutex
-
-func tracealloc(p unsafe.Pointer, size uintptr, typ *_type) {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- if typ == nil {
- print("tracealloc(", p, ", ", hex(size), ")\n")
- } else {
- print("tracealloc(", p, ", ", hex(size), ", ", *typ._string, ")\n")
- }
- if gp.m.curg == nil || gp == gp.m.curg {
- goroutineheader(gp)
- pc := getcallerpc(unsafe.Pointer(&p))
- sp := getcallersp(unsafe.Pointer(&p))
- onM(func() {
- traceback(pc, sp, 0, gp)
- })
- } else {
- goroutineheader(gp.m.curg)
- traceback(^uintptr(0), ^uintptr(0), 0, gp.m.curg)
- }
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
-
-func tracefree(p unsafe.Pointer, size uintptr) {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- print("tracefree(", p, ", ", hex(size), ")\n")
- goroutineheader(gp)
- pc := getcallerpc(unsafe.Pointer(&p))
- sp := getcallersp(unsafe.Pointer(&p))
- onM(func() {
- traceback(pc, sp, 0, gp)
- })
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
-
-func tracegc() {
- lock(&tracelock)
- gp := getg()
- gp.m.traceback = 2
- print("tracegc()\n")
- // running on m->g0 stack; show all non-g0 goroutines
- tracebackothers(gp)
- print("end tracegc\n")
- print("\n")
- gp.m.traceback = 0
- unlock(&tracelock)
-}
diff --git a/libgo/go/runtime/netpoll.go b/libgo/go/runtime/netpoll.go
deleted file mode 100644
index 3456e02..0000000
--- a/libgo/go/runtime/netpoll.go
+++ /dev/null
@@ -1,455 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
-
-package runtime
-
-import "unsafe"
-
-// Integrated network poller (platform-independent part).
-// A particular implementation (epoll/kqueue) must define the following functions:
-// func netpollinit() // to initialize the poller
-// func netpollopen(fd uintptr, pd *pollDesc) int32 // to arm edge-triggered notifications
-// and associate fd with pd.
-// An implementation must call the following function to denote that the pd is ready.
-// func netpollready(gpp **g, pd *pollDesc, mode int32)
-
-// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
-// goroutines respectively. The semaphore can be in the following states:
-// pdReady - io readiness notification is pending;
-// a goroutine consumes the notification by changing the state to nil.
-// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
-// the goroutine commits to park by changing the state to G pointer,
-// or, alternatively, concurrent io notification changes the state to READY,
-// or, alternatively, concurrent timeout/close changes the state to nil.
-// G pointer - the goroutine is blocked on the semaphore;
-// io notification or timeout/close changes the state to READY or nil respectively
-// and unparks the goroutine.
-// nil - nothing of the above.
-const (
- pdReady uintptr = 1
- pdWait uintptr = 2
-)
-
-const pollBlockSize = 4 * 1024
-
-// Network poller descriptor.
-type pollDesc struct {
- link *pollDesc // in pollcache, protected by pollcache.lock
-
- // The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
- // This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
- // pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
- // proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
- // in a lock-free way by all operations.
- // NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
- // that will blow up when GC starts moving objects.
- lock mutex // protectes the following fields
- fd uintptr
- closing bool
- seq uintptr // protects from stale timers and ready notifications
- rg uintptr // pdReady, pdWait, G waiting for read or nil
- rt timer // read deadline timer (set if rt.f != nil)
- rd int64 // read deadline
- wg uintptr // pdReady, pdWait, G waiting for write or nil
- wt timer // write deadline timer
- wd int64 // write deadline
- user unsafe.Pointer // user settable cookie
-}
-
-type pollCache struct {
- lock mutex
- first *pollDesc
- // PollDesc objects must be type-stable,
- // because we can get ready notification from epoll/kqueue
- // after the descriptor is closed/reused.
- // Stale notifications are detected using seq variable,
- // seq is incremented when deadlines are changed or descriptor is reused.
-}
-
-var pollcache pollCache
-
-func netpollServerInit() {
- onM(netpollinit)
-}
-
-func netpollOpen(fd uintptr) (*pollDesc, int) {
- pd := pollcache.alloc()
- lock(&pd.lock)
- if pd.wg != 0 && pd.wg != pdReady {
- gothrow("netpollOpen: blocked write on free descriptor")
- }
- if pd.rg != 0 && pd.rg != pdReady {
- gothrow("netpollOpen: blocked read on free descriptor")
- }
- pd.fd = fd
- pd.closing = false
- pd.seq++
- pd.rg = 0
- pd.rd = 0
- pd.wg = 0
- pd.wd = 0
- unlock(&pd.lock)
-
- var errno int32
- onM(func() {
- errno = netpollopen(fd, pd)
- })
- return pd, int(errno)
-}
-
-func netpollClose(pd *pollDesc) {
- if !pd.closing {
- gothrow("netpollClose: close w/o unblock")
- }
- if pd.wg != 0 && pd.wg != pdReady {
- gothrow("netpollClose: blocked write on closing descriptor")
- }
- if pd.rg != 0 && pd.rg != pdReady {
- gothrow("netpollClose: blocked read on closing descriptor")
- }
- onM(func() {
- netpollclose(uintptr(pd.fd))
- })
- pollcache.free(pd)
-}
-
-func (c *pollCache) free(pd *pollDesc) {
- lock(&c.lock)
- pd.link = c.first
- c.first = pd
- unlock(&c.lock)
-}
-
-func netpollReset(pd *pollDesc, mode int) int {
- err := netpollcheckerr(pd, int32(mode))
- if err != 0 {
- return err
- }
- if mode == 'r' {
- pd.rg = 0
- } else if mode == 'w' {
- pd.wg = 0
- }
- return 0
-}
-
-func netpollWait(pd *pollDesc, mode int) int {
- err := netpollcheckerr(pd, int32(mode))
- if err != 0 {
- return err
- }
- // As for now only Solaris uses level-triggered IO.
- if GOOS == "solaris" {
- onM(func() {
- netpollarm(pd, mode)
- })
- }
- for !netpollblock(pd, int32(mode), false) {
- err = netpollcheckerr(pd, int32(mode))
- if err != 0 {
- return err
- }
- // Can happen if timeout has fired and unblocked us,
- // but before we had a chance to run, timeout has been reset.
- // Pretend it has not happened and retry.
- }
- return 0
-}
-
-func netpollWaitCanceled(pd *pollDesc, mode int) {
- // This function is used only on windows after a failed attempt to cancel
- // a pending async IO operation. Wait for ioready, ignore closing or timeouts.
- for !netpollblock(pd, int32(mode), true) {
- }
-}
-
-func netpollSetDeadline(pd *pollDesc, d int64, mode int) {
- lock(&pd.lock)
- if pd.closing {
- unlock(&pd.lock)
- return
- }
- pd.seq++ // invalidate current timers
- // Reset current timers.
- if pd.rt.f != nil {
- deltimer(&pd.rt)
- pd.rt.f = nil
- }
- if pd.wt.f != nil {
- deltimer(&pd.wt)
- pd.wt.f = nil
- }
- // Setup new timers.
- if d != 0 && d <= nanotime() {
- d = -1
- }
- if mode == 'r' || mode == 'r'+'w' {
- pd.rd = d
- }
- if mode == 'w' || mode == 'r'+'w' {
- pd.wd = d
- }
- if pd.rd > 0 && pd.rd == pd.wd {
- pd.rt.f = netpollDeadline
- pd.rt.when = pd.rd
- // Copy current seq into the timer arg.
- // Timer func will check the seq against current descriptor seq,
- // if they differ the descriptor was reused or timers were reset.
- pd.rt.arg = pd
- pd.rt.seq = pd.seq
- addtimer(&pd.rt)
- } else {
- if pd.rd > 0 {
- pd.rt.f = netpollReadDeadline
- pd.rt.when = pd.rd
- pd.rt.arg = pd
- pd.rt.seq = pd.seq
- addtimer(&pd.rt)
- }
- if pd.wd > 0 {
- pd.wt.f = netpollWriteDeadline
- pd.wt.when = pd.wd
- pd.wt.arg = pd
- pd.wt.seq = pd.seq
- addtimer(&pd.wt)
- }
- }
- // If we set the new deadline in the past, unblock currently pending IO if any.
- var rg, wg *g
- atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
- if pd.rd < 0 {
- rg = netpollunblock(pd, 'r', false)
- }
- if pd.wd < 0 {
- wg = netpollunblock(pd, 'w', false)
- }
- unlock(&pd.lock)
- if rg != nil {
- goready(rg)
- }
- if wg != nil {
- goready(wg)
- }
-}
-
-func netpollUnblock(pd *pollDesc) {
- lock(&pd.lock)
- if pd.closing {
- gothrow("netpollUnblock: already closing")
- }
- pd.closing = true
- pd.seq++
- var rg, wg *g
- atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock
- rg = netpollunblock(pd, 'r', false)
- wg = netpollunblock(pd, 'w', false)
- if pd.rt.f != nil {
- deltimer(&pd.rt)
- pd.rt.f = nil
- }
- if pd.wt.f != nil {
- deltimer(&pd.wt)
- pd.wt.f = nil
- }
- unlock(&pd.lock)
- if rg != nil {
- goready(rg)
- }
- if wg != nil {
- goready(wg)
- }
-}
-
-func netpollfd(pd *pollDesc) uintptr {
- return pd.fd
-}
-
-func netpolluser(pd *pollDesc) *unsafe.Pointer {
- return &pd.user
-}
-
-func netpollclosing(pd *pollDesc) bool {
- return pd.closing
-}
-
-func netpolllock(pd *pollDesc) {
- lock(&pd.lock)
-}
-
-func netpollunlock(pd *pollDesc) {
- unlock(&pd.lock)
-}
-
-// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
-func netpollready(gpp **g, pd *pollDesc, mode int32) {
- var rg, wg *g
- if mode == 'r' || mode == 'r'+'w' {
- rg = netpollunblock(pd, 'r', true)
- }
- if mode == 'w' || mode == 'r'+'w' {
- wg = netpollunblock(pd, 'w', true)
- }
- if rg != nil {
- rg.schedlink = *gpp
- *gpp = rg
- }
- if wg != nil {
- wg.schedlink = *gpp
- *gpp = wg
- }
-}
-
-func netpollcheckerr(pd *pollDesc, mode int32) int {
- if pd.closing {
- return 1 // errClosing
- }
- if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) {
- return 2 // errTimeout
- }
- return 0
-}
-
-func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
- return casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
-}
-
-// returns true if IO is ready, or false if timedout or closed
-// waitio - wait only for completed IO, ignore errors
-func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
- gpp := &pd.rg
- if mode == 'w' {
- gpp = &pd.wg
- }
-
- // set the gpp semaphore to WAIT
- for {
- old := *gpp
- if old == pdReady {
- *gpp = 0
- return true
- }
- if old != 0 {
- gothrow("netpollblock: double wait")
- }
- if casuintptr(gpp, 0, pdWait) {
- break
- }
- }
-
- // need to recheck error states after setting gpp to WAIT
- // this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
- // do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
- if waitio || netpollcheckerr(pd, mode) == 0 {
- f := netpollblockcommit
- gopark(**(**unsafe.Pointer)(unsafe.Pointer(&f)), unsafe.Pointer(gpp), "IO wait")
- }
- // be careful to not lose concurrent READY notification
- old := xchguintptr(gpp, 0)
- if old > pdWait {
- gothrow("netpollblock: corrupted state")
- }
- return old == pdReady
-}
-
-func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
- gpp := &pd.rg
- if mode == 'w' {
- gpp = &pd.wg
- }
-
- for {
- old := *gpp
- if old == pdReady {
- return nil
- }
- if old == 0 && !ioready {
- // Only set READY for ioready. runtime_pollWait
- // will check for timeout/cancel before waiting.
- return nil
- }
- var new uintptr
- if ioready {
- new = pdReady
- }
- if casuintptr(gpp, old, new) {
- if old == pdReady || old == pdWait {
- old = 0
- }
- return (*g)(unsafe.Pointer(old))
- }
- }
-}
-
-func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
- lock(&pd.lock)
- // Seq arg is seq when the timer was set.
- // If it's stale, ignore the timer event.
- if seq != pd.seq {
- // The descriptor was reused or timers were reset.
- unlock(&pd.lock)
- return
- }
- var rg *g
- if read {
- if pd.rd <= 0 || pd.rt.f == nil {
- gothrow("netpolldeadlineimpl: inconsistent read deadline")
- }
- pd.rd = -1
- atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
- rg = netpollunblock(pd, 'r', false)
- }
- var wg *g
- if write {
- if pd.wd <= 0 || pd.wt.f == nil && !read {
- gothrow("netpolldeadlineimpl: inconsistent write deadline")
- }
- pd.wd = -1
- atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
- wg = netpollunblock(pd, 'w', false)
- }
- unlock(&pd.lock)
- if rg != nil {
- goready(rg)
- }
- if wg != nil {
- goready(wg)
- }
-}
-
-func netpollDeadline(arg interface{}, seq uintptr) {
- netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
-}
-
-func netpollReadDeadline(arg interface{}, seq uintptr) {
- netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
-}
-
-func netpollWriteDeadline(arg interface{}, seq uintptr) {
- netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
-}
-
-func (c *pollCache) alloc() *pollDesc {
- lock(&c.lock)
- if c.first == nil {
- const pdSize = unsafe.Sizeof(pollDesc{})
- n := pollBlockSize / pdSize
- if n == 0 {
- n = 1
- }
- // Must be in non-GC memory because can be referenced
- // only from epoll/kqueue internals.
- mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
- for i := uintptr(0); i < n; i++ {
- pd := (*pollDesc)(add(mem, i*pdSize))
- pd.link = c.first
- c.first = pd
- }
- }
- pd := c.first
- c.first = pd.link
- unlock(&c.lock)
- return pd
-}
diff --git a/libgo/go/runtime/netpoll_epoll.go b/libgo/go/runtime/netpoll_epoll.go
deleted file mode 100644
index ecfc9cd..0000000
--- a/libgo/go/runtime/netpoll_epoll.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux
-
-package runtime
-
-import "unsafe"
-
-func epollcreate(size int32) int32
-func epollcreate1(flags int32) int32
-
-//go:noescape
-func epollctl(epfd, op, fd int32, ev *epollevent) int32
-
-//go:noescape
-func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32
-func closeonexec(fd int32)
-
-var (
- epfd int32 = -1 // epoll descriptor
- netpolllasterr int32
-)
-
-func netpollinit() {
- epfd = epollcreate1(_EPOLL_CLOEXEC)
- if epfd >= 0 {
- return
- }
- epfd = epollcreate(1024)
- if epfd >= 0 {
- closeonexec(epfd)
- return
- }
- println("netpollinit: failed to create epoll descriptor", -epfd)
- gothrow("netpollinit: failed to create descriptor")
-}
-
-func netpollopen(fd uintptr, pd *pollDesc) int32 {
- var ev epollevent
- ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET
- *(**pollDesc)(unsafe.Pointer(&ev.data)) = pd
- return -epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev)
-}
-
-func netpollclose(fd uintptr) int32 {
- var ev epollevent
- return -epollctl(epfd, _EPOLL_CTL_DEL, int32(fd), &ev)
-}
-
-func netpollarm(pd *pollDesc, mode int) {
- gothrow("unused")
-}
-
-// polls for ready network connections
-// returns list of goroutines that become runnable
-func netpoll(block bool) (gp *g) {
- if epfd == -1 {
- return
- }
- waitms := int32(-1)
- if !block {
- waitms = 0
- }
- var events [128]epollevent
-retry:
- n := epollwait(epfd, &events[0], int32(len(events)), waitms)
- if n < 0 {
- if n != -_EINTR && n != netpolllasterr {
- netpolllasterr = n
- println("runtime: epollwait on fd", epfd, "failed with", -n)
- }
- goto retry
- }
- for i := int32(0); i < n; i++ {
- ev := &events[i]
- if ev.events == 0 {
- continue
- }
- var mode int32
- if ev.events&(_EPOLLIN|_EPOLLRDHUP|_EPOLLHUP|_EPOLLERR) != 0 {
- mode += 'r'
- }
- if ev.events&(_EPOLLOUT|_EPOLLHUP|_EPOLLERR) != 0 {
- mode += 'w'
- }
- if mode != 0 {
- pd := *(**pollDesc)(unsafe.Pointer(&ev.data))
- netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode)
- }
- }
- if block && gp == nil {
- goto retry
- }
- return gp
-}
diff --git a/libgo/go/runtime/netpoll_kqueue.go b/libgo/go/runtime/netpoll_kqueue.go
deleted file mode 100644
index d6d55b9..0000000
--- a/libgo/go/runtime/netpoll_kqueue.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd netbsd openbsd
-
-package runtime
-
-// Integrated network poller (kqueue-based implementation).
-
-import "unsafe"
-
-func kqueue() int32
-
-//go:noescape
-func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
-func closeonexec(fd int32)
-
-var (
- kq int32 = -1
- netpolllasterr int32
-)
-
-func netpollinit() {
- kq = kqueue()
- if kq < 0 {
- println("netpollinit: kqueue failed with", -kq)
- gothrow("netpollinit: kqueue failed")
- }
- closeonexec(kq)
-}
-
-func netpollopen(fd uintptr, pd *pollDesc) int32 {
- // Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
- // for the whole fd lifetime. The notifications are automatically unregistered
- // when fd is closed.
- var ev [2]keventt
- *(*uintptr)(unsafe.Pointer(&ev[0].ident)) = fd
- ev[0].filter = _EVFILT_READ
- ev[0].flags = _EV_ADD | _EV_CLEAR
- ev[0].fflags = 0
- ev[0].data = 0
- ev[0].udata = (*byte)(unsafe.Pointer(pd))
- ev[1] = ev[0]
- ev[1].filter = _EVFILT_WRITE
- n := kevent(kq, &ev[0], 2, nil, 0, nil)
- if n < 0 {
- return -n
- }
- return 0
-}
-
-func netpollclose(fd uintptr) int32 {
- // Don't need to unregister because calling close()
- // on fd will remove any kevents that reference the descriptor.
- return 0
-}
-
-func netpollarm(pd *pollDesc, mode int) {
- gothrow("unused")
-}
-
-// Polls for ready network connections.
-// Returns list of goroutines that become runnable.
-func netpoll(block bool) (gp *g) {
- if kq == -1 {
- return
- }
- var tp *timespec
- var ts timespec
- if !block {
- tp = &ts
- }
- var events [64]keventt
-retry:
- n := kevent(kq, nil, 0, &events[0], int32(len(events)), tp)
- if n < 0 {
- if n != -_EINTR && n != netpolllasterr {
- netpolllasterr = n
- println("runtime: kevent on fd", kq, "failed with", -n)
- }
- goto retry
- }
- for i := 0; i < int(n); i++ {
- ev := &events[i]
- var mode int32
- if ev.filter == _EVFILT_READ {
- mode += 'r'
- }
- if ev.filter == _EVFILT_WRITE {
- mode += 'w'
- }
- if mode != 0 {
- netpollready((**g)(noescape(unsafe.Pointer(&gp))), (*pollDesc)(unsafe.Pointer(ev.udata)), mode)
- }
- }
- if block && gp == nil {
- goto retry
- }
- return gp
-}
diff --git a/libgo/go/runtime/netpoll_nacl.go b/libgo/go/runtime/netpoll_nacl.go
deleted file mode 100644
index 5cbc300..0000000
--- a/libgo/go/runtime/netpoll_nacl.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Fake network poller for NaCl.
-// Should never be used, because NaCl network connections do not honor "SetNonblock".
-
-package runtime
-
-func netpollinit() {
-}
-
-func netpollopen(fd uintptr, pd *pollDesc) int32 {
- return 0
-}
-
-func netpollclose(fd uintptr) int32 {
- return 0
-}
-
-func netpollarm(pd *pollDesc, mode int) {
-}
-
-func netpoll(block bool) *g {
- return nil
-}
diff --git a/libgo/go/runtime/noasm_arm.go b/libgo/go/runtime/noasm_arm.go
deleted file mode 100644
index dd3ef82..0000000
--- a/libgo/go/runtime/noasm_arm.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Routines that are implemented in assembly in asm_{amd64,386}.s
-// but are implemented in Go for arm.
-
-package runtime
-
-func cmpstring(s1, s2 string) int {
- l := len(s1)
- if len(s2) < l {
- l = len(s2)
- }
- for i := 0; i < l; i++ {
- c1, c2 := s1[i], s2[i]
- if c1 < c2 {
- return -1
- }
- if c1 > c2 {
- return +1
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- if len(s1) > len(s2) {
- return +1
- }
- return 0
-}
-
-func cmpbytes(s1, s2 []byte) int {
- l := len(s1)
- if len(s2) < l {
- l = len(s2)
- }
- for i := 0; i < l; i++ {
- c1, c2 := s1[i], s2[i]
- if c1 < c2 {
- return -1
- }
- if c1 > c2 {
- return +1
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- if len(s1) > len(s2) {
- return +1
- }
- return 0
-}
diff --git a/libgo/go/runtime/norace_test.go b/libgo/go/runtime/norace_test.go
index 3b17187..3681bf1 100644
--- a/libgo/go/runtime/norace_test.go
+++ b/libgo/go/runtime/norace_test.go
@@ -34,12 +34,12 @@ func benchmarkSyscall(b *testing.B, work, excess int) {
b.RunParallel(func(pb *testing.PB) {
foo := 42
for pb.Next() {
- runtime.Entersyscall()
+ runtime.Entersyscall(0)
for i := 0; i < work; i++ {
foo *= 2
foo /= 2
}
- runtime.Exitsyscall()
+ runtime.Exitsyscall(0)
}
_ = foo
})
diff --git a/libgo/go/runtime/os_darwin.go b/libgo/go/runtime/os_darwin.go
deleted file mode 100644
index 4327ced..0000000
--- a/libgo/go/runtime/os_darwin.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func bsdthread_create(stk, mm, gg, fn unsafe.Pointer) int32
-func bsdthread_register() int32
-func mach_msg_trap(h unsafe.Pointer, op int32, send_size, rcv_size, rcv_name, timeout, notify uint32) int32
-func mach_reply_port() uint32
-func mach_task_self() uint32
-func mach_thread_self() uint32
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func sigprocmask(sig int32, new, old unsafe.Pointer)
-func sigaction(mode uint32, new, old unsafe.Pointer)
-func sigaltstack(new, old unsafe.Pointer)
-func sigtramp()
-func setitimer(mode int32, new, old unsafe.Pointer)
-func mach_semaphore_wait(sema uint32) int32
-func mach_semaphore_timedwait(sema, sec, nsec uint32) int32
-func mach_semaphore_signal(sema uint32) int32
-func mach_semaphore_signal_all(sema uint32) int32
diff --git a/libgo/go/runtime/os_dragonfly.go b/libgo/go/runtime/os_dragonfly.go
deleted file mode 100644
index cdaa069..0000000
--- a/libgo/go/runtime/os_dragonfly.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func lwp_create(param unsafe.Pointer) int32
-func sigaltstack(new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigprocmask(new, old unsafe.Pointer)
-func setitimer(mode int32, new, old unsafe.Pointer)
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func getrlimit(kind int32, limit unsafe.Pointer) int32
-func raise(sig int32)
-func sys_umtx_sleep(addr unsafe.Pointer, val, timeout int32) int32
-func sys_umtx_wakeup(addr unsafe.Pointer, val int32) int32
-
-const stackSystem = 0
diff --git a/libgo/go/runtime/os_freebsd.go b/libgo/go/runtime/os_freebsd.go
deleted file mode 100644
index 5970804..0000000
--- a/libgo/go/runtime/os_freebsd.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func thr_new(param unsafe.Pointer, size int32)
-func sigaltstack(new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigprocmask(new, old unsafe.Pointer)
-func setitimer(mode int32, new, old unsafe.Pointer)
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func getrlimit(kind int32, limit unsafe.Pointer) int32
-func raise(sig int32)
-func sys_umtx_op(addr unsafe.Pointer, mode int32, val uint32, ptr2, ts unsafe.Pointer) int32
diff --git a/libgo/go/runtime/os_linux.go b/libgo/go/runtime/os_linux.go
deleted file mode 100644
index 41123ad..0000000
--- a/libgo/go/runtime/os_linux.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func futex(addr unsafe.Pointer, op int32, val uint32, ts, addr2 unsafe.Pointer, val3 uint32) int32
-func clone(flags int32, stk, mm, gg, fn unsafe.Pointer) int32
-func rt_sigaction(sig uintptr, new, old unsafe.Pointer, size uintptr) int32
-func sigaltstack(new, old unsafe.Pointer)
-func setitimer(mode int32, new, old unsafe.Pointer)
-func rtsigprocmask(sig int32, new, old unsafe.Pointer, size int32)
-func getrlimit(kind int32, limit unsafe.Pointer) int32
-func raise(sig int32)
-func sched_getaffinity(pid, len uintptr, buf *uintptr) int32
diff --git a/libgo/go/runtime/os_nacl.go b/libgo/go/runtime/os_nacl.go
deleted file mode 100644
index 8dd43ff..0000000
--- a/libgo/go/runtime/os_nacl.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func nacl_exception_stack(p unsafe.Pointer, size int32) int32
-func nacl_exception_handler(fn, arg unsafe.Pointer) int32
-func nacl_sem_create(flag int32) int32
-func nacl_sem_wait(sem int32) int32
-func nacl_sem_post(sem int32) int32
-func nacl_mutex_create(flag int32) int32
-func nacl_mutex_lock(mutex int32) int32
-func nacl_mutex_trylock(mutex int32) int32
-func nacl_mutex_unlock(mutex int32) int32
-func nacl_cond_create(flag int32) int32
-func nacl_cond_wait(cond, n int32) int32
-func nacl_cond_signal(cond int32) int32
-func nacl_cond_broadcast(cond int32) int32
-func nacl_cond_timed_wait_abs(cond, lock int32, ts unsafe.Pointer) int32
-func nacl_thread_create(fn, stk, tls, xx unsafe.Pointer) int32
-func nacl_nanosleep(ts, extra unsafe.Pointer) int32
-
-func os_sigpipe() {
- gothrow("too many writes on closed pipe")
-}
-
-func sigpanic() {
- g := getg()
- if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
- }
-
- // Native Client only invokes the exception handler for memory faults.
- g.sig = _SIGSEGV
- panicmem()
-}
diff --git a/libgo/go/runtime/os_netbsd.go b/libgo/go/runtime/os_netbsd.go
deleted file mode 100644
index f000c5e..0000000
--- a/libgo/go/runtime/os_netbsd.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func setitimer(mode int32, new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigaltstack(new, old unsafe.Pointer)
-func sigprocmask(mode int32, new, old unsafe.Pointer)
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func lwp_tramp()
-func raise(sig int32)
-func getcontext(ctxt unsafe.Pointer)
-func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32
-func lwp_park(abstime unsafe.Pointer, unpark int32, hint, unparkhint unsafe.Pointer) int32
-func lwp_unpark(lwp int32, hint unsafe.Pointer) int32
-func lwp_self() int32
diff --git a/libgo/go/runtime/os_openbsd.go b/libgo/go/runtime/os_openbsd.go
deleted file mode 100644
index a000f96..0000000
--- a/libgo/go/runtime/os_openbsd.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func setitimer(mode int32, new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigaltstack(new, old unsafe.Pointer)
-func sigprocmask(mode int32, new uint32) uint32
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func raise(sig int32)
-func tfork(param unsafe.Pointer, psize uintptr, mm, gg, fn unsafe.Pointer) int32
-func thrsleep(ident unsafe.Pointer, clock_id int32, tsp, lock, abort unsafe.Pointer) int32
-func thrwakeup(ident unsafe.Pointer, n int32) int32
diff --git a/libgo/go/runtime/os_plan9.go b/libgo/go/runtime/os_plan9.go
deleted file mode 100644
index 10e5531..0000000
--- a/libgo/go/runtime/os_plan9.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-const _SIGPROF = 0 // dummy value for badsignal
-
-func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
-func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
-func seek(fd int32, offset int64, whence int32) int64
-func exits(msg *byte)
-func brk_(addr unsafe.Pointer) uintptr
-func sleep(ms int32) int32
-func rfork(flags int32) int32
-func plan9_semacquire(addr *uint32, block int32) int32
-func plan9_tsemacquire(addr *uint32, ms int32) int32
-func plan9_semrelease(addr *uint32, count int32) int32
-func notify(fn unsafe.Pointer) int32
-func noted(mode int32) int32
-func nsec(*int64) int64
-func sigtramp(ureg, msg unsafe.Pointer)
-func setfpmasks()
-func tstart_plan9(newm *m)
-func errstr() string
-
-type _Plink uintptr
-
-func os_sigpipe() {
- gothrow("too many writes on closed pipe")
-}
-
-func sigpanic() {
- g := getg()
- if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
- }
-
- note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig)))
- switch g.sig {
- case _SIGRFAULT, _SIGWFAULT:
- addr := note[index(note, "addr=")+5:]
- g.sigcode1 = uintptr(atolwhex(addr))
- if g.sigcode1 < 0x1000 || g.paniconfault {
- panicmem()
- }
- print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
- case _SIGTRAP:
- if g.paniconfault {
- panicmem()
- }
- gothrow(note)
- case _SIGINTDIV:
- panicdivide()
- case _SIGFLOAT:
- panicfloat()
- default:
- panic(errorString(note))
- }
-}
-
-func atolwhex(p string) int64 {
- for hasprefix(p, " ") || hasprefix(p, "\t") {
- p = p[1:]
- }
- neg := false
- if hasprefix(p, "-") || hasprefix(p, "+") {
- neg = p[0] == '-'
- p = p[1:]
- for hasprefix(p, " ") || hasprefix(p, "\t") {
- p = p[1:]
- }
- }
- var n int64
- switch {
- case hasprefix(p, "0x"), hasprefix(p, "0X"):
- p = p[2:]
- for ; len(p) > 0; p = p[1:] {
- if '0' <= p[0] && p[0] <= '9' {
- n = n*16 + int64(p[0]-'0')
- } else if 'a' <= p[0] && p[0] <= 'f' {
- n = n*16 + int64(p[0]-'a'+10)
- } else if 'A' <= p[0] && p[0] <= 'F' {
- n = n*16 + int64(p[0]-'A'+10)
- } else {
- break
- }
- }
- case hasprefix(p, "0"):
- for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] {
- n = n*8 + int64(p[0]-'0')
- }
- default:
- for ; len(p) > 0 && '0' <= p[0] && p[0] <= '9'; p = p[1:] {
- n = n*10 + int64(p[0]-'0')
- }
- }
- if neg {
- n = -n
- }
- return n
-}
diff --git a/libgo/go/runtime/os_solaris.go b/libgo/go/runtime/os_solaris.go
deleted file mode 100644
index ca13151..0000000
--- a/libgo/go/runtime/os_solaris.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func setitimer(mode int32, new, old unsafe.Pointer)
-func sigaction(sig int32, new, old unsafe.Pointer)
-func sigaltstack(new, old unsafe.Pointer)
-func sigprocmask(mode int32, new, old unsafe.Pointer)
-func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
-func getrlimit(kind int32, limit unsafe.Pointer)
-func miniterrno(fn unsafe.Pointer)
-func raise(sig int32)
-func getcontext(ctxt unsafe.Pointer)
-func tstart_sysvicall(mm unsafe.Pointer) uint32
-func nanotime1() int64
-func usleep1(usec uint32)
-func osyield1()
-func netpollinit()
-func netpollopen(fd uintptr, pd *pollDesc) int32
-func netpollclose(fd uintptr) int32
-func netpollarm(pd *pollDesc, mode int)
-
-type libcFunc byte
-
-var asmsysvicall6 libcFunc
-
-//go:nosplit
-func sysvicall0(fn *libcFunc) uintptr {
- libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
- libcall.n = 0
- // TODO(rsc): Why is noescape necessary here and below?
- libcall.args = uintptr(noescape(unsafe.Pointer(&fn))) // it's unused but must be non-nil, otherwise crashes
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
- return libcall.r1
-}
-
-//go:nosplit
-func sysvicall1(fn *libcFunc, a1 uintptr) uintptr {
- libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
- libcall.n = 1
- libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
- return libcall.r1
-}
-
-//go:nosplit
-func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr {
- libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
- libcall.n = 2
- libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
- return libcall.r1
-}
-
-//go:nosplit
-func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr {
- libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
- libcall.n = 3
- libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
- return libcall.r1
-}
-
-//go:nosplit
-func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
- libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
- libcall.n = 4
- libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
- return libcall.r1
-}
-
-//go:nosplit
-func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
- libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
- libcall.n = 5
- libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
- return libcall.r1
-}
-
-//go:nosplit
-func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
- libcall := &getg().m.libcall
- libcall.fn = uintptr(unsafe.Pointer(fn))
- libcall.n = 6
- libcall.args = uintptr(noescape(unsafe.Pointer(&a1)))
- asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(libcall))
- return libcall.r1
-}
diff --git a/libgo/go/runtime/os_windows.go b/libgo/go/runtime/os_windows.go
deleted file mode 100644
index 1528d2f..0000000
--- a/libgo/go/runtime/os_windows.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-type stdFunction *byte
-
-func stdcall0(fn stdFunction) uintptr
-func stdcall1(fn stdFunction, a0 uintptr) uintptr
-func stdcall2(fn stdFunction, a0, a1 uintptr) uintptr
-func stdcall3(fn stdFunction, a0, a1, a2 uintptr) uintptr
-func stdcall4(fn stdFunction, a0, a1, a2, a3 uintptr) uintptr
-func stdcall5(fn stdFunction, a0, a1, a2, a3, a4 uintptr) uintptr
-func stdcall6(fn stdFunction, a0, a1, a2, a3, a4, a5 uintptr) uintptr
-func stdcall7(fn stdFunction, a0, a1, a2, a3, a4, a5, a6 uintptr) uintptr
-
-func asmstdcall(fn unsafe.Pointer)
-func getlasterror() uint32
-func setlasterror(err uint32)
-func usleep1(usec uint32)
-func netpollinit()
-func netpollopen(fd uintptr, pd *pollDesc) int32
-func netpollclose(fd uintptr) int32
-func netpollarm(pd *pollDesc, mode int)
-
-func os_sigpipe() {
- gothrow("too many writes on closed pipe")
-}
-
-func sigpanic() {
- g := getg()
- if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
- }
-
- switch uint32(g.sig) {
- case _EXCEPTION_ACCESS_VIOLATION:
- if g.sigcode1 < 0x1000 || g.paniconfault {
- panicmem()
- }
- print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
- case _EXCEPTION_INT_DIVIDE_BY_ZERO:
- panicdivide()
- case _EXCEPTION_INT_OVERFLOW:
- panicoverflow()
- case _EXCEPTION_FLT_DENORMAL_OPERAND,
- _EXCEPTION_FLT_DIVIDE_BY_ZERO,
- _EXCEPTION_FLT_INEXACT_RESULT,
- _EXCEPTION_FLT_OVERFLOW,
- _EXCEPTION_FLT_UNDERFLOW:
- panicfloat()
- }
- gothrow("fault")
-}
diff --git a/libgo/go/runtime/os_windows_386.go b/libgo/go/runtime/os_windows_386.go
deleted file mode 100644
index 86a1906..0000000
--- a/libgo/go/runtime/os_windows_386.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// contextPC returns the EIP (program counter) register from the context.
-func contextPC(r *context) uintptr { return uintptr(r.eip) }
-
-// contextSP returns the ESP (stack pointer) register from the context.
-func contextSP(r *context) uintptr { return uintptr(r.esp) }
diff --git a/libgo/go/runtime/os_windows_amd64.go b/libgo/go/runtime/os_windows_amd64.go
deleted file mode 100644
index 3f4d4d0..0000000
--- a/libgo/go/runtime/os_windows_amd64.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// contextPC returns the RIP (program counter) register from the context.
-func contextPC(r *context) uintptr { return uintptr(r.rip) }
-
-// contextSP returns the RSP (stack pointer) register from the context.
-func contextSP(r *context) uintptr { return uintptr(r.rsp) }
diff --git a/libgo/go/runtime/panic.go b/libgo/go/runtime/panic.go
deleted file mode 100644
index 685ff5c..0000000
--- a/libgo/go/runtime/panic.go
+++ /dev/null
@@ -1,505 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-var indexError = error(errorString("index out of range"))
-
-func panicindex() {
- panic(indexError)
-}
-
-var sliceError = error(errorString("slice bounds out of range"))
-
-func panicslice() {
- panic(sliceError)
-}
-
-var divideError = error(errorString("integer divide by zero"))
-
-func panicdivide() {
- panic(divideError)
-}
-
-var overflowError = error(errorString("integer overflow"))
-
-func panicoverflow() {
- panic(overflowError)
-}
-
-var floatError = error(errorString("floating point error"))
-
-func panicfloat() {
- panic(floatError)
-}
-
-var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
-
-func panicmem() {
- panic(memoryError)
-}
-
-func throwreturn() {
- gothrow("no return at end of a typed function - compiler is broken")
-}
-
-func throwinit() {
- gothrow("recursive call during initialization - linker skew")
-}
-
-// Create a new deferred function fn with siz bytes of arguments.
-// The compiler turns a defer statement into a call to this.
-//go:nosplit
-func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
- // the arguments of fn are in a perilous state. The stack map
- // for deferproc does not describe them. So we can't let garbage
- // collection or stack copying trigger until we've copied them out
- // to somewhere safe. deferproc_m does that. Until deferproc_m,
- // we can only call nosplit routines.
- argp := uintptr(unsafe.Pointer(&fn))
- argp += unsafe.Sizeof(fn)
- if GOARCH == "arm" {
- argp += ptrSize // skip caller's saved link register
- }
- mp := acquirem()
- mp.scalararg[0] = uintptr(siz)
- mp.ptrarg[0] = unsafe.Pointer(fn)
- mp.scalararg[1] = argp
- mp.scalararg[2] = getcallerpc(unsafe.Pointer(&siz))
-
- if mp.curg != getg() {
- // go code on the m stack can't defer
- gothrow("defer on m")
- }
-
- onM(deferproc_m)
-
- releasem(mp)
-
- // deferproc returns 0 normally.
- // a deferred func that stops a panic
- // makes the deferproc return 1.
- // the code the compiler generates always
- // checks the return value and jumps to the
- // end of the function if deferproc returns != 0.
- return0()
- // No code can go here - the C return register has
- // been set and must not be clobbered.
-}
-
-// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
-// Each P holds a pool for defers with small arg sizes.
-// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
-
-const (
- deferHeaderSize = unsafe.Sizeof(_defer{})
- minDeferAlloc = (deferHeaderSize + 15) &^ 15
- minDeferArgs = minDeferAlloc - deferHeaderSize
-)
-
-// defer size class for arg size sz
-//go:nosplit
-func deferclass(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return 0
- }
- return (siz - minDeferArgs + 15) / 16
-}
-
-// total size of memory block for defer with arg size sz
-func totaldefersize(siz uintptr) uintptr {
- if siz <= minDeferArgs {
- return minDeferAlloc
- }
- return deferHeaderSize + siz
-}
-
-// Ensure that defer arg sizes that map to the same defer size class
-// also map to the same malloc size class.
-func testdefersizes() {
- var m [len(p{}.deferpool)]int32
-
- for i := range m {
- m[i] = -1
- }
- for i := uintptr(0); ; i++ {
- defersc := deferclass(i)
- if defersc >= uintptr(len(m)) {
- break
- }
- siz := goroundupsize(totaldefersize(i))
- if m[defersc] < 0 {
- m[defersc] = int32(siz)
- continue
- }
- if m[defersc] != int32(siz) {
- print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
- gothrow("bad defer size class")
- }
- }
-}
-
-// The arguments associated with a deferred call are stored
-// immediately after the _defer header in memory.
-//go:nosplit
-func deferArgs(d *_defer) unsafe.Pointer {
- return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
-}
-
-var deferType *_type // type of _defer struct
-
-func init() {
- var x interface{}
- x = (*_defer)(nil)
- deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
-}
-
-// Allocate a Defer, usually using per-P pool.
-// Each defer must be released with freedefer.
-// Note: runs on M stack
-func newdefer(siz int32) *_defer {
- var d *_defer
- sc := deferclass(uintptr(siz))
- mp := acquirem()
- if sc < uintptr(len(p{}.deferpool)) {
- pp := mp.p
- d = pp.deferpool[sc]
- if d != nil {
- pp.deferpool[sc] = d.link
- }
- }
- if d == nil {
- // Allocate new defer+args.
- total := goroundupsize(totaldefersize(uintptr(siz)))
- d = (*_defer)(mallocgc(total, deferType, 0))
- }
- d.siz = siz
- gp := mp.curg
- d.link = gp._defer
- gp._defer = d
- releasem(mp)
- return d
-}
-
-// Free the given defer.
-// The defer cannot be used after this call.
-//go:nosplit
-func freedefer(d *_defer) {
- if d._panic != nil {
- freedeferpanic()
- }
- if d.fn != nil {
- freedeferfn()
- }
- sc := deferclass(uintptr(d.siz))
- if sc < uintptr(len(p{}.deferpool)) {
- mp := acquirem()
- pp := mp.p
- *d = _defer{}
- d.link = pp.deferpool[sc]
- pp.deferpool[sc] = d
- releasem(mp)
- }
-}
-
-// Separate function so that it can split stack.
-// Windows otherwise runs out of stack space.
-func freedeferpanic() {
- // _panic must be cleared before d is unlinked from gp.
- gothrow("freedefer with d._panic != nil")
-}
-
-func freedeferfn() {
- // fn must be cleared before d is unlinked from gp.
- gothrow("freedefer with d.fn != nil")
-}
-
-// Run a deferred function if there is one.
-// The compiler inserts a call to this at the end of any
-// function which calls defer.
-// If there is a deferred function, this will call runtime·jmpdefer,
-// which will jump to the deferred function such that it appears
-// to have been called by the caller of deferreturn at the point
-// just before deferreturn was called. The effect is that deferreturn
-// is called again and again until there are no more deferred functions.
-// Cannot split the stack because we reuse the caller's frame to
-// call the deferred function.
-
-// The single argument isn't actually used - it just has its address
-// taken so it can be matched against pending defers.
-//go:nosplit
-func deferreturn(arg0 uintptr) {
- gp := getg()
- d := gp._defer
- if d == nil {
- return
- }
- argp := uintptr(unsafe.Pointer(&arg0))
- if d.argp != argp {
- return
- }
-
- // Moving arguments around.
- // Do not allow preemption here, because the garbage collector
- // won't know the form of the arguments until the jmpdefer can
- // flip the PC over to fn.
- mp := acquirem()
- memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz))
- fn := d.fn
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- releasem(mp)
- jmpdefer(fn, argp)
-}
-
-// Goexit terminates the goroutine that calls it. No other goroutine is affected.
-// Goexit runs all deferred calls before terminating the goroutine. Because Goexit
-// is not panic, however, any recover calls in those deferred functions will return nil.
-//
-// Calling Goexit from the main goroutine terminates that goroutine
-// without func main returning. Since func main has not returned,
-// the program continues execution of other goroutines.
-// If all other goroutines exit, the program crashes.
-func Goexit() {
- // Run all deferred functions for the current goroutine.
- // This code is similar to gopanic, see that implementation
- // for detailed comments.
- gp := getg()
- for {
- d := gp._defer
- if d == nil {
- break
- }
- if d.started {
- if d._panic != nil {
- d._panic.aborted = true
- d._panic = nil
- }
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- continue
- }
- d.started = true
- reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
- if gp._defer != d {
- gothrow("bad defer entry in Goexit")
- }
- d._panic = nil
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- // Note: we ignore recovers here because Goexit isn't a panic
- }
- goexit()
-}
-
-func canpanic(*g) bool
-
-// Print all currently active panics. Used when crashing.
-func printpanics(p *_panic) {
- if p.link != nil {
- printpanics(p.link)
- print("\t")
- }
- print("panic: ")
- printany(p.arg)
- if p.recovered {
- print(" [recovered]")
- }
- print("\n")
-}
-
-// The implementation of the predeclared function panic.
-func gopanic(e interface{}) {
- gp := getg()
- if gp.m.curg != gp {
- gothrow("panic on m stack")
- }
-
- // m.softfloat is set during software floating point.
- // It increments m.locks to avoid preemption.
- // We moved the memory loads out, so there shouldn't be
- // any reason for it to panic anymore.
- if gp.m.softfloat != 0 {
- gp.m.locks--
- gp.m.softfloat = 0
- gothrow("panic during softfloat")
- }
- if gp.m.mallocing != 0 {
- print("panic: ")
- printany(e)
- print("\n")
- gothrow("panic during malloc")
- }
- if gp.m.gcing != 0 {
- print("panic: ")
- printany(e)
- print("\n")
- gothrow("panic during gc")
- }
- if gp.m.locks != 0 {
- print("panic: ")
- printany(e)
- print("\n")
- gothrow("panic holding locks")
- }
-
- var p _panic
- p.arg = e
- p.link = gp._panic
- gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
-
- for {
- d := gp._defer
- if d == nil {
- break
- }
-
- // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
- // take defer off list. The earlier panic or Goexit will not continue running.
- if d.started {
- if d._panic != nil {
- d._panic.aborted = true
- }
- d._panic = nil
- d.fn = nil
- gp._defer = d.link
- freedefer(d)
- continue
- }
-
- // Mark defer as started, but keep on list, so that traceback
- // can find and update the defer's argument frame if stack growth
- // or a garbage collection hapens before reflectcall starts executing d.fn.
- d.started = true
-
- // Record the panic that is running the defer.
- // If there is a new panic during the deferred call, that panic
- // will find d in the list and will mark d._panic (this panic) aborted.
- d._panic = (*_panic)(noescape((unsafe.Pointer)(&p)))
-
- p.argp = unsafe.Pointer(getargp(0))
- reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
- p.argp = nil
-
- // reflectcall did not panic. Remove d.
- if gp._defer != d {
- gothrow("bad defer entry in panic")
- }
- d._panic = nil
- d.fn = nil
- gp._defer = d.link
-
- // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
- //GC()
-
- pc := d.pc
- argp := unsafe.Pointer(d.argp) // must be pointer so it gets adjusted during stack copy
- freedefer(d)
- if p.recovered {
- gp._panic = p.link
- // Aborted panics are marked but remain on the g.panic list.
- // Remove them from the list.
- for gp._panic != nil && gp._panic.aborted {
- gp._panic = gp._panic.link
- }
- if gp._panic == nil { // must be done with signal
- gp.sig = 0
- }
- // Pass information about recovering frame to recovery.
- gp.sigcode0 = uintptr(argp)
- gp.sigcode1 = pc
- mcall(recovery_m)
- gothrow("recovery failed") // mcall should not return
- }
- }
-
- // ran out of deferred calls - old-school panic now
- startpanic()
- printpanics(gp._panic)
- dopanic(0) // should not return
- *(*int)(nil) = 0 // not reached
-}
-
-// getargp returns the location where the caller
-// writes outgoing function call arguments.
-//go:nosplit
-func getargp(x int) uintptr {
- // x is an argument mainly so that we can return its address.
- // However, we need to make the function complex enough
- // that it won't be inlined. We always pass x = 0, so this code
- // does nothing other than keep the compiler from thinking
- // the function is simple enough to inline.
- if x > 0 {
- return getcallersp(unsafe.Pointer(&x)) * 0
- }
- return uintptr(noescape(unsafe.Pointer(&x)))
-}
-
-// The implementation of the predeclared function recover.
-// Cannot split the stack because it needs to reliably
-// find the stack segment of its caller.
-//
-// TODO(rsc): Once we commit to CopyStackAlways,
-// this doesn't need to be nosplit.
-//go:nosplit
-func gorecover(argp uintptr) interface{} {
- // Must be in a function running as part of a deferred call during the panic.
- // Must be called from the topmost function of the call
- // (the function used in the defer statement).
- // p.argp is the argument pointer of that topmost deferred function call.
- // Compare against argp reported by caller.
- // If they match, the caller is the one who can recover.
- gp := getg()
- p := gp._panic
- if p != nil && !p.recovered && argp == uintptr(p.argp) {
- p.recovered = true
- return p.arg
- }
- return nil
-}
-
-//go:nosplit
-func startpanic() {
- onM_signalok(startpanic_m)
-}
-
-//go:nosplit
-func dopanic(unused int) {
- gp := getg()
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(gp)
- mp.scalararg[0] = getcallerpc((unsafe.Pointer)(&unused))
- mp.scalararg[1] = getcallersp((unsafe.Pointer)(&unused))
- onM_signalok(dopanic_m) // should never return
- *(*int)(nil) = 0
-}
-
-//go:nosplit
-func throw(s *byte) {
- gp := getg()
- if gp.m.throwing == 0 {
- gp.m.throwing = 1
- }
- startpanic()
- print("fatal error: ", gostringnocopy(s), "\n")
- dopanic(0)
- *(*int)(nil) = 0 // not reached
-}
-
-//go:nosplit
-func gothrow(s string) {
- gp := getg()
- if gp.m.throwing == 0 {
- gp.m.throwing = 1
- }
- startpanic()
- print("fatal error: ", s, "\n")
- dopanic(0)
- *(*int)(nil) = 0 // not reached
-}
diff --git a/libgo/go/runtime/parfor_test.go b/libgo/go/runtime/parfor_test.go
index de64285..5d22aec 100644
--- a/libgo/go/runtime/parfor_test.go
+++ b/libgo/go/runtime/parfor_test.go
@@ -10,11 +10,8 @@ package runtime_test
import (
. "runtime"
"testing"
- "unsafe"
)
-var gdata []uint64
-
// Simple serial sanity test for parallelfor.
func TestParFor(t *testing.T) {
const P = 1
@@ -24,12 +21,7 @@ func TestParFor(t *testing.T) {
data[i] = i
}
desc := NewParFor(P)
- // Avoid making func a closure: parfor cannot invoke them.
- // Since it doesn't happen in the C code, it's not worth doing
- // just for the test.
- gdata = data
- ParForSetup(desc, P, N, nil, true, func(desc *ParFor, i uint32) {
- data := gdata
+ ParForSetup(desc, P, N, true, func(desc *ParFor, i uint32) {
data[i] = data[i]*data[i] + 1
})
ParForDo(desc)
@@ -49,9 +41,8 @@ func TestParFor2(t *testing.T) {
data[i] = i
}
desc := NewParFor(P)
- ParForSetup(desc, P, N, (*byte)(unsafe.Pointer(&data)), false, func(desc *ParFor, i uint32) {
- d := *(*[]uint64)(unsafe.Pointer(desc.Ctx))
- d[i] = d[i]*d[i] + 1
+ ParForSetup(desc, P, N, false, func(desc *ParFor, i uint32) {
+ data[i] = data[i]*data[i] + 1
})
for p := 0; p < P; p++ {
ParForDo(desc)
@@ -70,7 +61,7 @@ func TestParForSetup(t *testing.T) {
desc := NewParFor(P)
for n := uint32(0); n < N; n++ {
for p := uint32(1); p <= P; p++ {
- ParForSetup(desc, p, n, nil, true, func(desc *ParFor, i uint32) {})
+ ParForSetup(desc, p, n, true, func(desc *ParFor, i uint32) {})
sum := uint32(0)
size0 := uint32(0)
end0 := uint32(0)
@@ -113,9 +104,7 @@ func TestParForParallel(t *testing.T) {
P := GOMAXPROCS(-1)
c := make(chan bool, P)
desc := NewParFor(uint32(P))
- gdata = data
- ParForSetup(desc, uint32(P), uint32(N), nil, false, func(desc *ParFor, i uint32) {
- data := gdata
+ ParForSetup(desc, uint32(P), uint32(N), false, func(desc *ParFor, i uint32) {
data[i] = data[i]*data[i] + 1
})
for p := 1; p < P; p++ {
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index 38593af..dcf67cd 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -21,7 +21,7 @@ import (
)
// BUG(rsc): Profiles are incomplete and inaccurate on NetBSD and OS X.
-// See http://golang.org/issue/6047 for details.
+// See https://golang.org/issue/6047 for details.
// A Profile is a collection of stack traces showing the call sequences
// that led to instances of a particular event, such as allocation.
@@ -41,6 +41,13 @@ import (
// These predefined profiles maintain themselves and panic on an explicit
// Add or Remove method call.
//
+// The heap profile reports statistics as of the most recently completed
+// garbage collection; it elides more recent allocation to avoid skewing
+// the profile away from live data and toward garbage.
+// If there has been no garbage collection at all, the heap profile reports
+// all known allocations. This exception helps mainly in programs running
+// without garbage collection enabled, usually for debugging purposes.
+//
// The CPU profile is not available as a Profile. It has a special API,
// the StartCPUProfile and StopCPUProfile functions, because it streams
// output to a writer during profiling.
@@ -454,35 +461,33 @@ func writeHeap(w io.Writer, debug int) error {
// Print memstats information too.
// Pprof will ignore, but useful for people
- if debug > 0 {
- s := new(runtime.MemStats)
- runtime.ReadMemStats(s)
- fmt.Fprintf(w, "\n# runtime.MemStats\n")
- fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
- fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
- fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
- fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
- fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
- fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
-
- fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
- fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
- fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
- fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
- fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
- fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
-
- fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
- fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
- fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
- fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
-
- fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
- fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
- fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
- fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
- fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
- }
+ s := new(runtime.MemStats)
+ runtime.ReadMemStats(s)
+ fmt.Fprintf(w, "\n# runtime.MemStats\n")
+ fmt.Fprintf(w, "# Alloc = %d\n", s.Alloc)
+ fmt.Fprintf(w, "# TotalAlloc = %d\n", s.TotalAlloc)
+ fmt.Fprintf(w, "# Sys = %d\n", s.Sys)
+ fmt.Fprintf(w, "# Lookups = %d\n", s.Lookups)
+ fmt.Fprintf(w, "# Mallocs = %d\n", s.Mallocs)
+ fmt.Fprintf(w, "# Frees = %d\n", s.Frees)
+
+ fmt.Fprintf(w, "# HeapAlloc = %d\n", s.HeapAlloc)
+ fmt.Fprintf(w, "# HeapSys = %d\n", s.HeapSys)
+ fmt.Fprintf(w, "# HeapIdle = %d\n", s.HeapIdle)
+ fmt.Fprintf(w, "# HeapInuse = %d\n", s.HeapInuse)
+ fmt.Fprintf(w, "# HeapReleased = %d\n", s.HeapReleased)
+ fmt.Fprintf(w, "# HeapObjects = %d\n", s.HeapObjects)
+
+ fmt.Fprintf(w, "# Stack = %d / %d\n", s.StackInuse, s.StackSys)
+ fmt.Fprintf(w, "# MSpan = %d / %d\n", s.MSpanInuse, s.MSpanSys)
+ fmt.Fprintf(w, "# MCache = %d / %d\n", s.MCacheInuse, s.MCacheSys)
+ fmt.Fprintf(w, "# BuckHashSys = %d\n", s.BuckHashSys)
+
+ fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
+ fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
+ fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
+ fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
+ fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
if tw != nil {
tw.Flush()
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index 1069963..c32b847 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -9,7 +9,9 @@ package pprof_test
import (
"bytes"
"fmt"
+ "internal/testenv"
"math/big"
+ "os"
"os/exec"
"regexp"
"runtime"
@@ -121,15 +123,19 @@ func parseProfile(t *testing.T, bytes []byte, f func(uintptr, []uintptr)) {
func testCPUProfile(t *testing.T, need []string, f func()) {
switch runtime.GOOS {
case "darwin":
- out, err := exec.Command("uname", "-a").CombinedOutput()
- if err != nil {
- t.Fatal(err)
+ switch runtime.GOARCH {
+ case "arm", "arm64":
+ // nothing
+ default:
+ out, err := exec.Command("uname", "-a").CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ vers := string(out)
+ t.Logf("uname -a: %v", vers)
}
- vers := string(out)
- t.Logf("uname -a: %v", vers)
case "plan9":
- // unimplemented
- return
+ t.Skip("skipping on plan9")
}
var prof bytes.Buffer
@@ -141,7 +147,9 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
// Check that profile is well formed and contains need.
have := make([]uintptr, len(need))
+ var samples uintptr
parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) {
+ samples += count
for _, pc := range stk {
f := runtime.FuncForPC(pc)
if f == nil {
@@ -155,6 +163,14 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
}
}
})
+ t.Logf("total %d CPU profile samples collected", samples)
+
+ if samples < 10 && runtime.GOOS == "windows" {
+ // On some windows machines we end up with
+ // not enough samples due to coarse timer
+ // resolution. Let it go.
+ t.Skip("too few samples on Windows (golang.org/issue/10842)")
+ }
if len(need) == 0 {
return
@@ -187,14 +203,28 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS)
return
}
+ // Ignore the failure if the tests are running in a QEMU-based emulator,
+ // QEMU is not perfect at emulating everything.
+ // IN_QEMU environmental variable is set by some of the Go builders.
+ // IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
+ if os.Getenv("IN_QEMU") == "1" {
+ t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
+ return
+ }
t.FailNow()
}
}
+// Fork can hang if preempted with signals frequently enough (see issue 5517).
+// Ensure that we do not do this.
func TestCPUProfileWithFork(t *testing.T) {
- // Fork can hang if preempted with signals frequently enough (see issue 5517).
- // Ensure that we do not do this.
+ testenv.MustHaveExec(t)
+
heap := 1 << 30
+ if runtime.GOOS == "android" {
+ // Use smaller size for Android to avoid crash.
+ heap = 100 << 20
+ }
if testing.Short() {
heap = 100 << 20
}
@@ -217,7 +247,7 @@ func TestCPUProfileWithFork(t *testing.T) {
defer StopCPUProfile()
for i := 0; i < 10; i++ {
- exec.Command("go").CombinedOutput()
+ exec.Command(os.Args[0], "-h").CombinedOutput()
}
}
@@ -250,7 +280,7 @@ func TestGoroutineSwitch(t *testing.T) {
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
f := runtime.FuncForPC(stk[1])
- if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode" || f.Name() == "GC") {
+ if f != nil && (f.Name() == "runtime._System" || f.Name() == "runtime._ExternalCode" || f.Name() == "runtime._GC") {
return
}
}
@@ -368,7 +398,7 @@ func TestBlockProfile(t *testing.T) {
}
for _, test := range tests {
- if !regexp.MustCompile(test.re).MatchString(prof) {
+ if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
t.Fatalf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
}
}
diff --git a/libgo/go/runtime/print1.go b/libgo/go/runtime/print1.go
deleted file mode 100644
index 8f82688..0000000
--- a/libgo/go/runtime/print1.go
+++ /dev/null
@@ -1,323 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-// The compiler knows that a print of a value of this type
-// should use printhex instead of printuint (decimal).
-type hex uint64
-
-func bytes(s string) (ret []byte) {
- rp := (*slice)(unsafe.Pointer(&ret))
- sp := (*_string)(noescape(unsafe.Pointer(&s)))
- rp.array = sp.str
- rp.len = uint(sp.len)
- rp.cap = uint(sp.len)
- return
-}
-
-// printf is only called from C code. It has no type information for the args,
-// but C stacks are ignored by the garbage collector anyway, so having
-// type information would not add anything.
-//go:nosplit
-func printf(s *byte) {
- vprintf(gostringnocopy(s), add(unsafe.Pointer(&s), unsafe.Sizeof(s)))
-}
-
-// sprintf is only called from C code. It has no type information for the args,
-// but C stacks are ignored by the garbage collector anyway, so having
-// type information would not add anything.
-//go:nosplit
-func snprintf(dst *byte, n int32, s *byte) {
- buf := (*[1 << 30]byte)(unsafe.Pointer(dst))[0:n:n]
-
- gp := getg()
- gp.writebuf = buf[0:0 : n-1] // leave room for NUL, this is called from C
- vprintf(gostringnocopy(s), add(unsafe.Pointer(&s), unsafe.Sizeof(s)))
- buf[len(gp.writebuf)] = '\x00'
- gp.writebuf = nil
-}
-
-//var debuglock mutex
-
-// write to goroutine-local buffer if diverting output,
-// or else standard error.
-func gwrite(b []byte) {
- if len(b) == 0 {
- return
- }
- gp := getg()
- if gp == nil || gp.writebuf == nil {
- write(2, unsafe.Pointer(&b[0]), int32(len(b)))
- return
- }
-
- n := copy(gp.writebuf[len(gp.writebuf):cap(gp.writebuf)], b)
- gp.writebuf = gp.writebuf[:len(gp.writebuf)+n]
-}
-
-func prints(s *byte) {
- b := (*[1 << 30]byte)(unsafe.Pointer(s))
- for i := 0; ; i++ {
- if b[i] == 0 {
- gwrite(b[:i])
- return
- }
- }
-}
-
-func printsp() {
- print(" ")
-}
-
-func printnl() {
- print("\n")
-}
-
-// Very simple printf. Only for debugging prints.
-// Do not add to this without checking with Rob.
-func vprintf(str string, arg unsafe.Pointer) {
- //lock(&debuglock);
-
- s := bytes(str)
- start := 0
- i := 0
- for ; i < len(s); i++ {
- if s[i] != '%' {
- continue
- }
- if i > start {
- gwrite(s[start:i])
- }
- if i++; i >= len(s) {
- break
- }
- var siz uintptr
- switch s[i] {
- case 't', 'c':
- siz = 1
- case 'd', 'x': // 32-bit
- arg = roundup(arg, 4)
- siz = 4
- case 'D', 'U', 'X', 'f': // 64-bit
- arg = roundup(arg, unsafe.Sizeof(uintreg(0)))
- siz = 8
- case 'C':
- arg = roundup(arg, unsafe.Sizeof(uintreg(0)))
- siz = 16
- case 'p', 's': // pointer-sized
- arg = roundup(arg, unsafe.Sizeof(uintptr(0)))
- siz = unsafe.Sizeof(uintptr(0))
- case 'S': // pointer-aligned but bigger
- arg = roundup(arg, unsafe.Sizeof(uintptr(0)))
- siz = unsafe.Sizeof(string(""))
- case 'a': // pointer-aligned but bigger
- arg = roundup(arg, unsafe.Sizeof(uintptr(0)))
- siz = unsafe.Sizeof([]byte{})
- case 'i', 'e': // pointer-aligned but bigger
- arg = roundup(arg, unsafe.Sizeof(uintptr(0)))
- siz = unsafe.Sizeof(interface{}(nil))
- }
- switch s[i] {
- case 'a':
- printslice(*(*[]byte)(arg))
- case 'c':
- printbyte(*(*byte)(arg))
- case 'd':
- printint(int64(*(*int32)(arg)))
- case 'D':
- printint(int64(*(*int64)(arg)))
- case 'e':
- printeface(*(*interface{})(arg))
- case 'f':
- printfloat(*(*float64)(arg))
- case 'C':
- printcomplex(*(*complex128)(arg))
- case 'i':
- printiface(*(*fInterface)(arg))
- case 'p':
- printpointer(*(*unsafe.Pointer)(arg))
- case 's':
- prints(*(**byte)(arg))
- case 'S':
- printstring(*(*string)(arg))
- case 't':
- printbool(*(*bool)(arg))
- case 'U':
- printuint(*(*uint64)(arg))
- case 'x':
- printhex(uint64(*(*uint32)(arg)))
- case 'X':
- printhex(*(*uint64)(arg))
- }
- arg = add(arg, siz)
- start = i + 1
- }
- if start < i {
- gwrite(s[start:i])
- }
-
- //unlock(&debuglock);
-}
-
-func printpc(p unsafe.Pointer) {
- print("PC=", hex(uintptr(p)))
-}
-
-func printbool(v bool) {
- if v {
- print("true")
- } else {
- print("false")
- }
-}
-
-func printbyte(c byte) {
- gwrite((*[1]byte)(unsafe.Pointer(&c))[:])
-}
-
-func printfloat(v float64) {
- switch {
- case v != v:
- print("NaN")
- return
- case v+v == v && v > 0:
- print("+Inf")
- return
- case v+v == v && v < 0:
- print("-Inf")
- return
- }
-
- const n = 7 // digits printed
- var buf [n + 7]byte
- buf[0] = '+'
- e := 0 // exp
- if v == 0 {
- if 1/v < 0 {
- buf[0] = '-'
- }
- } else {
- if v < 0 {
- v = -v
- buf[0] = '-'
- }
-
- // normalize
- for v >= 10 {
- e++
- v /= 10
- }
- for v < 1 {
- e--
- v *= 10
- }
-
- // round
- h := 5.0
- for i := 0; i < n; i++ {
- h /= 10
- }
- v += h
- if v >= 10 {
- e++
- v /= 10
- }
- }
-
- // format +d.dddd+edd
- for i := 0; i < n; i++ {
- s := int(v)
- buf[i+2] = byte(s + '0')
- v -= float64(s)
- v *= 10
- }
- buf[1] = buf[2]
- buf[2] = '.'
-
- buf[n+2] = 'e'
- buf[n+3] = '+'
- if e < 0 {
- e = -e
- buf[n+3] = '-'
- }
-
- buf[n+4] = byte(e/100) + '0'
- buf[n+5] = byte(e/10)%10 + '0'
- buf[n+6] = byte(e%10) + '0'
- gwrite(buf[:])
-}
-
-func printcomplex(c complex128) {
- print("(", real(c), imag(c), "i)")
-}
-
-func printuint(v uint64) {
- var buf [100]byte
- i := len(buf)
- for i--; i > 0; i-- {
- buf[i] = byte(v%10 + '0')
- if v < 10 {
- break
- }
- v /= 10
- }
- gwrite(buf[i:])
-}
-
-func printint(v int64) {
- if v < 0 {
- print("-")
- v = -v
- }
- printuint(uint64(v))
-}
-
-func printhex(v uint64) {
- const dig = "0123456789abcdef"
- var buf [100]byte
- i := len(buf)
- for i--; i > 0; i-- {
- buf[i] = dig[v%16]
- if v < 16 {
- break
- }
- v /= 16
- }
- i--
- buf[i] = 'x'
- i--
- buf[i] = '0'
- gwrite(buf[i:])
-}
-
-func printpointer(p unsafe.Pointer) {
- printhex(uint64(uintptr(p)))
-}
-
-func printstring(s string) {
- if uintptr(len(s)) > maxstring {
- gwrite(bytes("[string too long]"))
- return
- }
- gwrite(bytes(s))
-}
-
-func printslice(s []byte) {
- sp := (*slice)(unsafe.Pointer(&s))
- print("[", len(s), "/", cap(s), "]")
- printpointer(unsafe.Pointer(sp.array))
-}
-
-func printeface(e interface{}) {
- ep := (*eface)(unsafe.Pointer(&e))
- print("(", ep._type, ",", ep.data, ")")
-}
-
-func printiface(i fInterface) {
- ip := (*iface)(unsafe.Pointer(&i))
- print("(", ip.tab, ",", ip.data, ")")
-}
diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go
deleted file mode 100644
index 517ca03..0000000
--- a/libgo/go/runtime/proc.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-func newsysmon()
-
-func runtime_init()
-func main_init()
-func main_main()
-
-// The main goroutine.
-func main() {
- g := getg()
-
- // Racectx of m0->g0 is used only as the parent of the main goroutine.
- // It must not be used for anything else.
- g.m.g0.racectx = 0
-
- // Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
- // Using decimal instead of binary GB and MB because
- // they look nicer in the stack overflow failure message.
- if ptrSize == 8 {
- maxstacksize = 1000000000
- } else {
- maxstacksize = 250000000
- }
-
- onM(newsysmon)
-
- // Lock the main goroutine onto this, the main OS thread,
- // during initialization. Most programs won't care, but a few
- // do require certain calls to be made by the main thread.
- // Those can arrange for main.main to run in the main thread
- // by calling runtime.LockOSThread during initialization
- // to preserve the lock.
- lockOSThread()
-
- if g.m != &m0 {
- gothrow("runtime.main not on m0")
- }
-
- runtime_init() // must be before defer
-
- // Defer unlock so that runtime.Goexit during init does the unlock too.
- needUnlock := true
- defer func() {
- if needUnlock {
- unlockOSThread()
- }
- }()
-
- memstats.enablegc = true // now that runtime is initialized, GC is okay
-
- main_init()
-
- needUnlock = false
- unlockOSThread()
-
- main_main()
- if raceenabled {
- racefini()
- }
-
- // Make racy client program work: if panicking on
- // another goroutine at the same time as main returns,
- // let the other goroutine finish printing the panic trace.
- // Once it does, it will exit. See issue 3934.
- if panicking != 0 {
- gopark(nil, nil, "panicwait")
- }
-
- exit(0)
- for {
- var x *int32
- *x = 0
- }
-}
-
-var parkunlock_c byte
-
-// start forcegc helper goroutine
-func init() {
- go forcegchelper()
-}
-
-func forcegchelper() {
- forcegc.g = getg()
- forcegc.g.issystem = true
- for {
- lock(&forcegc.lock)
- if forcegc.idle != 0 {
- gothrow("forcegc: phase error")
- }
- atomicstore(&forcegc.idle, 1)
- goparkunlock(&forcegc.lock, "force gc (idle)")
- // this goroutine is explicitly resumed by sysmon
- if debug.gctrace > 0 {
- println("GC forced")
- }
- gogc(1)
- }
-}
-
-//go:nosplit
-
-// Gosched yields the processor, allowing other goroutines to run. It does not
-// suspend the current goroutine, so execution resumes automatically.
-func Gosched() {
- mcall(gosched_m)
-}
-
-// Puts the current goroutine into a waiting state and calls unlockf.
-// If unlockf returns false, the goroutine is resumed.
-func gopark(unlockf unsafe.Pointer, lock unsafe.Pointer, reason string) {
- mp := acquirem()
- gp := mp.curg
- status := readgstatus(gp)
- if status != _Grunning && status != _Gscanrunning {
- gothrow("gopark: bad g status")
- }
- mp.waitlock = lock
- mp.waitunlockf = unlockf
- gp.waitreason = reason
- releasem(mp)
- // can't do anything that might move the G between Ms here.
- mcall(park_m)
-}
-
-// Puts the current goroutine into a waiting state and unlocks the lock.
-// The goroutine can be made runnable again by calling goready(gp).
-func goparkunlock(lock *mutex, reason string) {
- gopark(unsafe.Pointer(&parkunlock_c), unsafe.Pointer(lock), reason)
-}
-
-func goready(gp *g) {
- mp := acquirem()
- mp.ptrarg[0] = unsafe.Pointer(gp)
- onM(ready_m)
- releasem(mp)
-}
-
-//go:nosplit
-func acquireSudog() *sudog {
- c := gomcache()
- s := c.sudogcache
- if s != nil {
- if s.elem != nil {
- gothrow("acquireSudog: found s.elem != nil in cache")
- }
- c.sudogcache = s.next
- s.next = nil
- return s
- }
-
- // Delicate dance: the semaphore implementation calls
- // acquireSudog, acquireSudog calls new(sudog),
- // new calls malloc, malloc can call the garbage collector,
- // and the garbage collector calls the semaphore implementation
- // in stoptheworld.
- // Break the cycle by doing acquirem/releasem around new(sudog).
- // The acquirem/releasem increments m.locks during new(sudog),
- // which keeps the garbage collector from being invoked.
- mp := acquirem()
- p := new(sudog)
- releasem(mp)
- return p
-}
-
-//go:nosplit
-func releaseSudog(s *sudog) {
- if s.elem != nil {
- gothrow("runtime: sudog with non-nil elem")
- }
- if s.selectdone != nil {
- gothrow("runtime: sudog with non-nil selectdone")
- }
- if s.next != nil {
- gothrow("runtime: sudog with non-nil next")
- }
- if s.prev != nil {
- gothrow("runtime: sudog with non-nil prev")
- }
- if s.waitlink != nil {
- gothrow("runtime: sudog with non-nil waitlink")
- }
- gp := getg()
- if gp.param != nil {
- gothrow("runtime: releaseSudog with non-nil gp.param")
- }
- c := gomcache()
- s.next = c.sudogcache
- c.sudogcache = s
-}
-
-// funcPC returns the entry PC of the function f.
-// It assumes that f is a func value. Otherwise the behavior is undefined.
-//go:nosplit
-func funcPC(f interface{}) uintptr {
- return **(**uintptr)(add(unsafe.Pointer(&f), ptrSize))
-}
-
-// called from assembly
-func badmcall(fn func(*g)) {
- gothrow("runtime: mcall called on m->g0 stack")
-}
-
-func badmcall2(fn func(*g)) {
- gothrow("runtime: mcall function returned")
-}
-
-func badreflectcall() {
- panic("runtime: arg size to reflect.call more than 1GB")
-}
-
-func lockedOSThread() bool {
- gp := getg()
- return gp.lockedm != nil && gp.m.lockedg != nil
-}
-
-func newP() *p {
- return new(p)
-}
-
-func newM() *m {
- return new(m)
-}
-
-func newG() *g {
- return new(g)
-}
-
-func allgadd(gp *g) {
- if readgstatus(gp) == _Gidle {
- gothrow("allgadd: bad status Gidle")
- }
-
- lock(&allglock)
- allgs = append(allgs, gp)
- allg = &allgs[0]
- allglen = uintptr(len(allgs))
- unlock(&allglock)
-}
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index 4f364dc..4350e8f 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -7,6 +7,8 @@ package runtime_test
import (
"math"
"runtime"
+ "runtime/debug"
+ "sync"
"sync/atomic"
"syscall"
"testing"
@@ -94,6 +96,10 @@ func TestYieldLocked(t *testing.T) {
}
func TestGoroutineParallelism(t *testing.T) {
+ if runtime.NumCPU() == 1 {
+ // Takes too long, too easy to deadlock, etc.
+ t.Skip("skipping on uniprocessor")
+ }
P := 4
N := 10
if testing.Short() {
@@ -101,6 +107,10 @@ func TestGoroutineParallelism(t *testing.T) {
N = 3
}
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
+ // If runtime triggers a forced GC during this test then it will deadlock,
+ // since the goroutines can't be stopped/preempted.
+ // Disable GC for this test (see issue #10958).
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
for try := 0; try < N; try++ {
done := make(chan bool)
x := uint32(0)
@@ -289,6 +299,98 @@ func main() {
}
`
+func TestPingPongHog(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in -short mode")
+ }
+
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+ done := make(chan bool)
+ hogChan, lightChan := make(chan bool), make(chan bool)
+ hogCount, lightCount := 0, 0
+
+ run := func(limit int, counter *int, wake chan bool) {
+ for {
+ select {
+ case <-done:
+ return
+
+ case <-wake:
+ for i := 0; i < limit; i++ {
+ *counter++
+ }
+ wake <- true
+ }
+ }
+ }
+
+ // Start two co-scheduled hog goroutines.
+ for i := 0; i < 2; i++ {
+ go run(1e6, &hogCount, hogChan)
+ }
+
+ // Start two co-scheduled light goroutines.
+ for i := 0; i < 2; i++ {
+ go run(1e3, &lightCount, lightChan)
+ }
+
+ // Start goroutine pairs and wait for a few preemption rounds.
+ hogChan <- true
+ lightChan <- true
+ time.Sleep(100 * time.Millisecond)
+ close(done)
+ <-hogChan
+ <-lightChan
+
+ // Check that hogCount and lightCount are within a factor of
+ // 2, which indicates that both pairs of goroutines handed off
+ // the P within a time-slice to their buddy.
+ if hogCount > lightCount*2 || lightCount > hogCount*2 {
+ t.Fatalf("want hogCount/lightCount in [0.5, 2]; got %d/%d = %g", hogCount, lightCount, float64(hogCount)/float64(lightCount))
+ }
+}
+
+func BenchmarkPingPongHog(b *testing.B) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
+
+ // Create a CPU hog
+ stop, done := make(chan bool), make(chan bool)
+ go func() {
+ for {
+ select {
+ case <-stop:
+ done <- true
+ return
+ default:
+ }
+ }
+ }()
+
+ // Ping-pong b.N times
+ ping, pong := make(chan bool), make(chan bool)
+ go func() {
+ for j := 0; j < b.N; j++ {
+ pong <- <-ping
+ }
+ close(stop)
+ done <- true
+ }()
+ go func() {
+ for i := 0; i < b.N; i++ {
+ ping <- <-pong
+ }
+ done <- true
+ }()
+ b.ResetTimer()
+ ping <- true // Start ping-pong
+ <-stop
+ b.StopTimer()
+ <-ping // Let last ponger exit
+ <-done // Make sure goroutines exit
+ <-done
+ <-done
+}
+
func stackGrowthRecursive(i int) {
var pad [128]uint64
if i != 0 && pad[0] == 0 {
@@ -364,13 +466,17 @@ func nonleaf(stop chan int) bool {
}
}
+/*
func TestSchedLocalQueue(t *testing.T) {
runtime.TestSchedLocalQueue1()
}
+*/
+/*
func TestSchedLocalQueueSteal(t *testing.T) {
runtime.TestSchedLocalQueueSteal1()
}
+*/
func benchmarkStackGrowth(b *testing.B, rec int) {
b.RunParallel(func(pb *testing.PB) {
@@ -414,6 +520,37 @@ func benchmarkCreateGoroutines(b *testing.B, procs int) {
}
}
+func BenchmarkCreateGoroutinesCapture(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ const N = 4
+ var wg sync.WaitGroup
+ wg.Add(N)
+ for i := 0; i < N; i++ {
+ i := i
+ go func() {
+ if i >= N {
+ b.Logf("bad") // just to capture b
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ }
+}
+
+func BenchmarkClosureCall(b *testing.B) {
+ sum := 0
+ off1 := 1
+ for i := 0; i < b.N; i++ {
+ off2 := 2
+ func() {
+ sum += i + off1 + off2
+ }()
+ }
+ _ = sum
+}
+
type Matrix [][]float64
func BenchmarkMatmult(b *testing.B) {
diff --git a/libgo/go/runtime/race0.go b/libgo/go/runtime/race0.go
deleted file mode 100644
index 5d90cc8..0000000
--- a/libgo/go/runtime/race0.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !race
-
-// Dummy race detection API, used when not built with -race.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-const raceenabled = false
-
-// Because raceenabled is false, none of these functions should be called.
-
-func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func raceinit() { gothrow("race") }
-func racefini() { gothrow("race") }
-func racemapshadow(addr unsafe.Pointer, size uintptr) { gothrow("race") }
-func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
-func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
-func raceacquire(addr unsafe.Pointer) { gothrow("race") }
-func raceacquireg(gp *g, addr unsafe.Pointer) { gothrow("race") }
-func racerelease(addr unsafe.Pointer) { gothrow("race") }
-func racereleaseg(gp *g, addr unsafe.Pointer) { gothrow("race") }
-func racereleasemerge(addr unsafe.Pointer) { gothrow("race") }
-func racereleasemergeg(gp *g, addr unsafe.Pointer) { gothrow("race") }
-func racefingo() { gothrow("race") }
-func racemalloc(p unsafe.Pointer, sz uintptr) { gothrow("race") }
-func racegostart(pc uintptr) uintptr { gothrow("race"); return 0 }
-func racegoend() { gothrow("race") }
diff --git a/libgo/go/runtime/rdebug.go b/libgo/go/runtime/rdebug.go
deleted file mode 100644
index e5e6911..0000000
--- a/libgo/go/runtime/rdebug.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-func setMaxStack(in int) (out int) {
- out = int(maxstacksize)
- maxstacksize = uintptr(in)
- return out
-}
-
-func setGCPercent(in int32) (out int32) {
- mp := acquirem()
- mp.scalararg[0] = uintptr(int(in))
- onM(setgcpercent_m)
- out = int32(int(mp.scalararg[0]))
- releasem(mp)
- return out
-}
-
-func setPanicOnFault(new bool) (old bool) {
- mp := acquirem()
- old = mp.curg.paniconfault
- mp.curg.paniconfault = new
- releasem(mp)
- return old
-}
-
-func setMaxThreads(in int) (out int) {
- mp := acquirem()
- mp.scalararg[0] = uintptr(in)
- onM(setmaxthreads_m)
- out = int(mp.scalararg[0])
- releasem(mp)
- return out
-}
diff --git a/libgo/go/runtime/rune.go b/libgo/go/runtime/rune.go
deleted file mode 100644
index a9f6835..0000000
--- a/libgo/go/runtime/rune.go
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * The authors of this software are Rob Pike and Ken Thompson.
- * Copyright (c) 2002 by Lucent Technologies.
- * Portions Copyright 2009 The Go Authors. All rights reserved.
- * Permission to use, copy, modify, and distribute this software for any
- * purpose without fee is hereby granted, provided that this entire notice
- * is included in all copies of any software which is or includes a copy
- * or modification of this software and in all copies of the supporting
- * documentation for such software.
- * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
- * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY
- * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
- * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
- */
-
-/*
- * This code is copied, with slight editing due to type differences,
- * from a subset of ../lib9/utf/rune.c
- */
-
-package runtime
-
-const (
- bit1 = 7
- bitx = 6
- bit2 = 5
- bit3 = 4
- bit4 = 3
- bit5 = 2
-
- t1 = ((1 << (bit1 + 1)) - 1) ^ 0xFF /* 0000 0000 */
- tx = ((1 << (bitx + 1)) - 1) ^ 0xFF /* 1000 0000 */
- t2 = ((1 << (bit2 + 1)) - 1) ^ 0xFF /* 1100 0000 */
- t3 = ((1 << (bit3 + 1)) - 1) ^ 0xFF /* 1110 0000 */
- t4 = ((1 << (bit4 + 1)) - 1) ^ 0xFF /* 1111 0000 */
- t5 = ((1 << (bit5 + 1)) - 1) ^ 0xFF /* 1111 1000 */
-
- rune1 = (1 << (bit1 + 0*bitx)) - 1 /* 0000 0000 0111 1111 */
- rune2 = (1 << (bit2 + 1*bitx)) - 1 /* 0000 0111 1111 1111 */
- rune3 = (1 << (bit3 + 2*bitx)) - 1 /* 1111 1111 1111 1111 */
- rune4 = (1 << (bit4 + 3*bitx)) - 1 /* 0001 1111 1111 1111 1111 1111 */
-
- maskx = (1 << bitx) - 1 /* 0011 1111 */
- testx = maskx ^ 0xFF /* 1100 0000 */
-
- runeerror = 0xFFFD
- runeself = 0x80
-
- surrogateMin = 0xD800
- surrogateMax = 0xDFFF
-
- bad = runeerror
-
- runemax = 0x10FFFF /* maximum rune value */
-)
-
-/*
- * Modified by Wei-Hwa Huang, Google Inc., on 2004-09-24
- * This is a slower but "safe" version of the old chartorune
- * that works on strings that are not necessarily null-terminated.
- *
- * If you know for sure that your string is null-terminated,
- * chartorune will be a bit faster.
- *
- * It is guaranteed not to attempt to access "length"
- * past the incoming pointer. This is to avoid
- * possible access violations. If the string appears to be
- * well-formed but incomplete (i.e., to get the whole Rune
- * we'd need to read past str+length) then we'll set the Rune
- * to Bad and return 0.
- *
- * Note that if we have decoding problems for other
- * reasons, we return 1 instead of 0.
- */
-func charntorune(s string) (rune, int) {
- /* When we're not allowed to read anything */
- if len(s) <= 0 {
- return bad, 1
- }
-
- /*
- * one character sequence (7-bit value)
- * 00000-0007F => T1
- */
- c := s[0]
- if c < tx {
- return rune(c), 1
- }
-
- // If we can't read more than one character we must stop
- if len(s) <= 1 {
- return bad, 1
- }
-
- /*
- * two character sequence (11-bit value)
- * 0080-07FF => t2 tx
- */
- c1 := s[1] ^ tx
- if (c1 & testx) != 0 {
- return bad, 1
- }
- if c < t3 {
- if c < t2 {
- return bad, 1
- }
- l := ((rune(c) << bitx) | rune(c1)) & rune2
- if l <= rune1 {
- return bad, 1
- }
- return l, 2
- }
-
- // If we can't read more than two characters we must stop
- if len(s) <= 2 {
- return bad, 1
- }
-
- /*
- * three character sequence (16-bit value)
- * 0800-FFFF => t3 tx tx
- */
- c2 := s[2] ^ tx
- if (c2 & testx) != 0 {
- return bad, 1
- }
- if c < t4 {
- l := ((((rune(c) << bitx) | rune(c1)) << bitx) | rune(c2)) & rune3
- if l <= rune2 {
- return bad, 1
- }
- if surrogateMin <= l && l <= surrogateMax {
- return bad, 1
- }
- return l, 3
- }
-
- if len(s) <= 3 {
- return bad, 1
- }
-
- /*
- * four character sequence (21-bit value)
- * 10000-1FFFFF => t4 tx tx tx
- */
- c3 := s[3] ^ tx
- if (c3 & testx) != 0 {
- return bad, 1
- }
- if c < t5 {
- l := ((((((rune(c) << bitx) | rune(c1)) << bitx) | rune(c2)) << bitx) | rune(c3)) & rune4
- if l <= rune3 || l > runemax {
- return bad, 1
- }
- return l, 4
- }
-
- // Support for 5-byte or longer UTF-8 would go here, but
- // since we don't have that, we'll just return bad.
- return bad, 1
-}
-
-// runetochar converts r to bytes and writes the result to str.
-// returns the number of bytes generated.
-func runetochar(str []byte, r rune) int {
- /* runes are signed, so convert to unsigned for range check. */
- c := uint32(r)
- /*
- * one character sequence
- * 00000-0007F => 00-7F
- */
- if c <= rune1 {
- str[0] = byte(c)
- return 1
- }
- /*
- * two character sequence
- * 0080-07FF => t2 tx
- */
- if c <= rune2 {
- str[0] = byte(t2 | (c >> (1 * bitx)))
- str[1] = byte(tx | (c & maskx))
- return 2
- }
-
- /*
- * If the rune is out of range or a surrogate half, convert it to the error rune.
- * Do this test here because the error rune encodes to three bytes.
- * Doing it earlier would duplicate work, since an out of range
- * rune wouldn't have fit in one or two bytes.
- */
- if c > runemax {
- c = runeerror
- }
- if surrogateMin <= c && c <= surrogateMax {
- c = runeerror
- }
-
- /*
- * three character sequence
- * 0800-FFFF => t3 tx tx
- */
- if c <= rune3 {
- str[0] = byte(t3 | (c >> (2 * bitx)))
- str[1] = byte(tx | ((c >> (1 * bitx)) & maskx))
- str[2] = byte(tx | (c & maskx))
- return 3
- }
-
- /*
- * four character sequence (21-bit value)
- * 10000-1FFFFF => t4 tx tx tx
- */
- str[0] = byte(t4 | (c >> (3 * bitx)))
- str[1] = byte(tx | ((c >> (2 * bitx)) & maskx))
- str[2] = byte(tx | ((c >> (1 * bitx)) & maskx))
- str[3] = byte(tx | (c & maskx))
- return 4
-}
diff --git a/libgo/go/runtime/runtime.go b/libgo/go/runtime/runtime.go
deleted file mode 100644
index 4e4e1d1..0000000
--- a/libgo/go/runtime/runtime.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-var ticks struct {
- lock mutex
- val uint64
-}
-
-var tls0 [8]uintptr // available storage for m0's TLS; not necessarily used; opaque to GC
-
-// Note: Called by runtime/pprof in addition to runtime code.
-func tickspersecond() int64 {
- r := int64(atomicload64(&ticks.val))
- if r != 0 {
- return r
- }
- lock(&ticks.lock)
- r = int64(ticks.val)
- if r == 0 {
- t0 := nanotime()
- c0 := cputicks()
- usleep(100 * 1000)
- t1 := nanotime()
- c1 := cputicks()
- if t1 == t0 {
- t1++
- }
- r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0)
- if r == 0 {
- r++
- }
- atomicstore64(&ticks.val, uint64(r))
- }
- unlock(&ticks.lock)
- return r
-}
-
-func makeStringSlice(n int) []string {
- return make([]string, n)
-}
-
-// TODO: Move to parfor.go when parfor.c becomes parfor.go.
-func parforalloc(nthrmax uint32) *parfor {
- return &parfor{
- thr: &make([]parforthread, nthrmax)[0],
- nthrmax: nthrmax,
- }
-}
-
-var envs []string
-var argslice []string
-
-// called from syscall
-func runtime_envs() []string { return envs }
-
-// called from os
-func runtime_args() []string { return argslice }
diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go
index 8059d1a..bb8ff71 100644
--- a/libgo/go/runtime/runtime_test.go
+++ b/libgo/go/runtime/runtime_test.go
@@ -6,13 +6,8 @@ package runtime_test
import (
"io"
- // "io/ioutil"
- // "os"
- // "os/exec"
. "runtime"
"runtime/debug"
- // "strconv"
- // "strings"
"testing"
"unsafe"
)
@@ -88,51 +83,6 @@ func BenchmarkDeferMany(b *testing.B) {
}
}
-/* The go tool is not present in gccgo.
-
-// The profiling signal handler needs to know whether it is executing runtime.gogo.
-// The constant RuntimeGogoBytes in arch_*.h gives the size of the function;
-// we don't have a way to obtain it from the linker (perhaps someday).
-// Test that the constant matches the size determined by 'go tool nm -S'.
-// The value reported will include the padding between runtime.gogo and the
-// next function in memory. That's fine.
-func TestRuntimeGogoBytes(t *testing.T) {
- switch GOOS {
- case "android", "nacl":
- t.Skipf("skipping on %s", GOOS)
- }
-
- dir, err := ioutil.TempDir("", "go-build")
- if err != nil {
- t.Fatalf("failed to create temp directory: %v", err)
- }
- defer os.RemoveAll(dir)
-
- out, err := exec.Command("go", "build", "-o", dir+"/hello", "../../test/helloworld.go").CombinedOutput()
- if err != nil {
- t.Fatalf("building hello world: %v\n%s", err, out)
- }
-
- out, err = exec.Command("go", "tool", "nm", "-size", dir+"/hello").CombinedOutput()
- if err != nil {
- t.Fatalf("go tool nm: %v\n%s", err, out)
- }
-
- for _, line := range strings.Split(string(out), "\n") {
- f := strings.Fields(line)
- if len(f) == 4 && f[3] == "runtime.gogo" {
- size, _ := strconv.Atoi(f[1])
- if GogoBytes() != int32(size) {
- t.Fatalf("RuntimeGogoBytes = %d, should be %d", GogoBytes(), size)
- }
- return
- }
- }
-
- t.Fatalf("go tool nm did not report size for runtime.gogo")
-}
-*/
-
// golang.org/issue/7063
func TestStopCPUProfilingWithProfilerOff(t *testing.T) {
SetCPUProfileRate(0)
@@ -176,12 +126,6 @@ var faultAddrs = []uint64{
}
func TestSetPanicOnFault(t *testing.T) {
- // This currently results in a fault in the signal trampoline on
- // dragonfly/386 - see issue 7421.
- if GOOS == "dragonfly" && GOARCH == "386" {
- t.Skip("skipping test on dragonfly/386")
- }
-
old := debug.SetPanicOnFault(true)
defer debug.SetPanicOnFault(old)
@@ -250,3 +194,112 @@ func TestEqString(t *testing.T) {
}
}
}
+
+/*
+func TestTrailingZero(t *testing.T) {
+ // make sure we add padding for structs with trailing zero-sized fields
+ type T1 struct {
+ n int32
+ z [0]byte
+ }
+ if unsafe.Sizeof(T1{}) != 8 {
+ t.Errorf("sizeof(%#v)==%d, want 8", T1{}, unsafe.Sizeof(T1{}))
+ }
+ type T2 struct {
+ n int64
+ z struct{}
+ }
+ if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(Uintreg(0)) {
+ t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(Uintreg(0)))
+ }
+ type T3 struct {
+ n byte
+ z [4]struct{}
+ }
+ if unsafe.Sizeof(T3{}) != 2 {
+ t.Errorf("sizeof(%#v)==%d, want 2", T3{}, unsafe.Sizeof(T3{}))
+ }
+ // make sure padding can double for both zerosize and alignment
+ type T4 struct {
+ a int32
+ b int16
+ c int8
+ z struct{}
+ }
+ if unsafe.Sizeof(T4{}) != 8 {
+ t.Errorf("sizeof(%#v)==%d, want 8", T4{}, unsafe.Sizeof(T4{}))
+ }
+ // make sure we don't pad a zero-sized thing
+ type T5 struct {
+ }
+ if unsafe.Sizeof(T5{}) != 0 {
+ t.Errorf("sizeof(%#v)==%d, want 0", T5{}, unsafe.Sizeof(T5{}))
+ }
+}
+*/
+
+func TestBadOpen(t *testing.T) {
+ if GOOS == "windows" || GOOS == "nacl" {
+ t.Skip("skipping OS that doesn't have open/read/write/close")
+ }
+ // make sure we get the correct error code if open fails. Same for
+ // read/write/close on the resulting -1 fd. See issue 10052.
+ nonfile := []byte("/notreallyafile")
+ fd := Open(&nonfile[0], 0, 0)
+ if fd != -1 {
+ t.Errorf("open(\"%s\")=%d, want -1", string(nonfile), fd)
+ }
+ var buf [32]byte
+ r := Read(-1, unsafe.Pointer(&buf[0]), int32(len(buf)))
+ if r != -1 {
+ t.Errorf("read()=%d, want -1", r)
+ }
+ w := Write(^uintptr(0), unsafe.Pointer(&buf[0]), int32(len(buf)))
+ if w != -1 {
+ t.Errorf("write()=%d, want -1", w)
+ }
+ c := Close(-1)
+ if c != -1 {
+ t.Errorf("close()=%d, want -1", c)
+ }
+}
+
+func TestAppendGrowth(t *testing.T) {
+ var x []int64
+ check := func(want int) {
+ if cap(x) != want {
+ t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
+ }
+ }
+
+ check(0)
+ want := 1
+ for i := 1; i <= 100; i++ {
+ x = append(x, 1)
+ check(want)
+ if i&(i-1) == 0 {
+ want = 2 * i
+ }
+ }
+}
+
+var One = []int64{1}
+
+func TestAppendSliceGrowth(t *testing.T) {
+ var x []int64
+ check := func(want int) {
+ if cap(x) != want {
+ t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want)
+ }
+ }
+
+ check(0)
+ want := 1
+ for i := 1; i <= 100; i++ {
+ x = append(x, One...)
+ check(want)
+ if i&(i-1) == 0 {
+ want = 2 * i
+ }
+ }
+}
diff --git a/libgo/go/runtime/runtime_unix_test.go b/libgo/go/runtime/runtime_unix_test.go
index 963de8c..cfec332 100644
--- a/libgo/go/runtime/runtime_unix_test.go
+++ b/libgo/go/runtime/runtime_unix_test.go
@@ -42,7 +42,7 @@ func TestGoroutineProfile(t *testing.T) {
if testing.Short() {
max = 100
}
- stk := make([]runtime.StackRecord, 100)
+ stk := make([]runtime.StackRecord, 128)
for n := 0; n < max; n++ {
_, ok := runtime.GoroutineProfile(stk)
if !ok {
diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go
deleted file mode 100644
index f735a71..0000000
--- a/libgo/go/runtime/select.go
+++ /dev/null
@@ -1,651 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-// This file contains the implementation of Go select statements.
-
-import "unsafe"
-
-const (
- debugSelect = false
-)
-
-var (
- chansendpc = funcPC(chansend)
- chanrecvpc = funcPC(chanrecv)
-)
-
-func selectsize(size uintptr) uintptr {
- selsize := unsafe.Sizeof(_select{}) +
- (size-1)*unsafe.Sizeof(_select{}.scase[0]) +
- size*unsafe.Sizeof(*_select{}.lockorder) +
- size*unsafe.Sizeof(*_select{}.pollorder)
- return round(selsize, _Int64Align)
-}
-
-func newselect(sel *_select, selsize int64, size int32) {
- if selsize != int64(selectsize(uintptr(size))) {
- print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
- gothrow("bad select size")
- }
- sel.tcase = uint16(size)
- sel.ncase = 0
- sel.lockorder = (**hchan)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(_select{}.scase[0])))
- sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*_select{}.lockorder)))
-
- if debugSelect {
- print("newselect s=", sel, " size=", size, "\n")
- }
-}
-
-//go:nosplit
-func selectsend(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) {
- // nil cases do not compete
- if c != nil {
- selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
- }
- return
-}
-
-// cut in half to give stack a chance to split
-func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) {
- i := sel.ncase
- if i >= sel.tcase {
- gothrow("selectsend: too many cases")
- }
- sel.ncase = i + 1
- cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
-
- cas.pc = pc
- cas._chan = c
- cas.so = uint16(so)
- cas.kind = _CaseSend
- cas.elem = elem
-
- if debugSelect {
- print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n")
- }
-}
-
-//go:nosplit
-func selectrecv(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) {
- // nil cases do not compete
- if c != nil {
- selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
- }
- return
-}
-
-//go:nosplit
-func selectrecv2(sel *_select, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) {
- // nil cases do not compete
- if c != nil {
- selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
- }
- return
-}
-
-func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) {
- i := sel.ncase
- if i >= sel.tcase {
- gothrow("selectrecv: too many cases")
- }
- sel.ncase = i + 1
- cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
- cas.pc = pc
- cas._chan = c
- cas.so = uint16(so)
- cas.kind = _CaseRecv
- cas.elem = elem
- cas.receivedp = received
-
- if debugSelect {
- print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n")
- }
-}
-
-//go:nosplit
-func selectdefault(sel *_select) (selected bool) {
- selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
- return
-}
-
-func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) {
- i := sel.ncase
- if i >= sel.tcase {
- gothrow("selectdefault: too many cases")
- }
- sel.ncase = i + 1
- cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
- cas.pc = callerpc
- cas._chan = nil
- cas.so = uint16(so)
- cas.kind = _CaseDefault
-
- if debugSelect {
- print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n")
- }
-}
-
-func sellock(sel *_select) {
- lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
- lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice))
- var c *hchan
- for _, c0 := range lockorder {
- if c0 != nil && c0 != c {
- c = c0
- lock(&c.lock)
- }
- }
-}
-
-func selunlock(sel *_select) {
- // We must be very careful here to not touch sel after we have unlocked
- // the last lock, because sel can be freed right after the last unlock.
- // Consider the following situation.
- // First M calls runtime·park() in runtime·selectgo() passing the sel.
- // Once runtime·park() has unlocked the last lock, another M makes
- // the G that calls select runnable again and schedules it for execution.
- // When the G runs on another M, it locks all the locks and frees sel.
- // Now if the first M touches sel, it will access freed memory.
- n := int(sel.ncase)
- r := 0
- lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), n, n}
- lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice))
- // skip the default case
- if n > 0 && lockorder[0] == nil {
- r = 1
- }
- for i := n - 1; i >= r; i-- {
- c := lockorder[i]
- if i > 0 && c == lockorder[i-1] {
- continue // will unlock it on the next iteration
- }
- unlock(&c.lock)
- }
-}
-
-func selparkcommit(gp *g, sel *_select) bool {
- selunlock(sel)
- return true
-}
-
-func block() {
- gopark(nil, nil, "select (no cases)") // forever
-}
-
-// overwrites return pc on stack to signal which case of the select
-// to run, so cannot appear at the top of a split stack.
-//go:nosplit
-func selectgo(sel *_select) {
- pc, offset := selectgoImpl(sel)
- *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true
- setcallerpc(unsafe.Pointer(&sel), pc)
-}
-
-// selectgoImpl returns scase.pc and scase.so for the select
-// case which fired.
-func selectgoImpl(sel *_select) (uintptr, uint16) {
- if debugSelect {
- print("select: sel=", sel, "\n")
- }
-
- scaseslice := sliceStruct{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
- scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
-
- var t0 int64
- if blockprofilerate > 0 {
- t0 = cputicks()
- for i := 0; i < int(sel.ncase); i++ {
- scases[i].releasetime = -1
- }
- }
-
- // The compiler rewrites selects that statically have
- // only 0 or 1 cases plus default into simpler constructs.
- // The only way we can end up with such small sel.ncase
- // values here is for a larger select in which most channels
- // have been nilled out. The general code handles those
- // cases correctly, and they are rare enough not to bother
- // optimizing (and needing to test).
-
- // generate permuted order
- pollslice := sliceStruct{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
- pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
- for i := 0; i < int(sel.ncase); i++ {
- pollorder[i] = uint16(i)
- }
- for i := 1; i < int(sel.ncase); i++ {
- o := pollorder[i]
- j := int(fastrand1()) % (i + 1)
- pollorder[i] = pollorder[j]
- pollorder[j] = o
- }
-
- // sort the cases by Hchan address to get the locking order.
- // simple heap sort, to guarantee n log n time and constant stack footprint.
- lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
- lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice))
- for i := 0; i < int(sel.ncase); i++ {
- j := i
- c := scases[j]._chan
- for j > 0 && lockorder[(j-1)/2].sortkey() < c.sortkey() {
- k := (j - 1) / 2
- lockorder[j] = lockorder[k]
- j = k
- }
- lockorder[j] = c
- }
- for i := int(sel.ncase) - 1; i >= 0; i-- {
- c := lockorder[i]
- lockorder[i] = lockorder[0]
- j := 0
- for {
- k := j*2 + 1
- if k >= i {
- break
- }
- if k+1 < i && lockorder[k].sortkey() < lockorder[k+1].sortkey() {
- k++
- }
- if c.sortkey() < lockorder[k].sortkey() {
- lockorder[j] = lockorder[k]
- j = k
- continue
- }
- break
- }
- lockorder[j] = c
- }
- /*
- for i := 0; i+1 < int(sel.ncase); i++ {
- if lockorder[i].sortkey() > lockorder[i+1].sortkey() {
- print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
- gothrow("select: broken sort")
- }
- }
- */
-
- // lock all the channels involved in the select
- sellock(sel)
-
- var (
- gp *g
- done uint32
- sg *sudog
- c *hchan
- k *scase
- sglist *sudog
- sgnext *sudog
- )
-
-loop:
- // pass 1 - look for something already waiting
- var dfl *scase
- var cas *scase
- for i := 0; i < int(sel.ncase); i++ {
- cas = &scases[pollorder[i]]
- c = cas._chan
-
- switch cas.kind {
- case _CaseRecv:
- if c.dataqsiz > 0 {
- if c.qcount > 0 {
- goto asyncrecv
- }
- } else {
- sg = c.sendq.dequeue()
- if sg != nil {
- goto syncrecv
- }
- }
- if c.closed != 0 {
- goto rclose
- }
-
- case _CaseSend:
- if raceenabled {
- racereadpc(unsafe.Pointer(c), cas.pc, chansendpc)
- }
- if c.closed != 0 {
- goto sclose
- }
- if c.dataqsiz > 0 {
- if c.qcount < c.dataqsiz {
- goto asyncsend
- }
- } else {
- sg = c.recvq.dequeue()
- if sg != nil {
- goto syncsend
- }
- }
-
- case _CaseDefault:
- dfl = cas
- }
- }
-
- if dfl != nil {
- selunlock(sel)
- cas = dfl
- goto retc
- }
-
- // pass 2 - enqueue on all chans
- gp = getg()
- done = 0
- for i := 0; i < int(sel.ncase); i++ {
- cas = &scases[pollorder[i]]
- c = cas._chan
- sg := acquireSudog()
- sg.g = gp
- // Note: selectdone is adjusted for stack copies in stack.c:adjustsudogs
- sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done)))
- sg.elem = cas.elem
- sg.releasetime = 0
- if t0 != 0 {
- sg.releasetime = -1
- }
- sg.waitlink = gp.waiting
- gp.waiting = sg
-
- switch cas.kind {
- case _CaseRecv:
- c.recvq.enqueue(sg)
-
- case _CaseSend:
- c.sendq.enqueue(sg)
- }
- }
-
- // wait for someone to wake us up
- gp.param = nil
- gopark(unsafe.Pointer(funcPC(selparkcommit)), unsafe.Pointer(sel), "select")
-
- // someone woke us up
- sellock(sel)
- sg = (*sudog)(gp.param)
- gp.param = nil
-
- // pass 3 - dequeue from unsuccessful chans
- // otherwise they stack up on quiet channels
- // record the successful case, if any.
- // We singly-linked up the SudoGs in case order, so when
- // iterating through the linked list they are in reverse order.
- cas = nil
- sglist = gp.waiting
- // Clear all selectdone and elem before unlinking from gp.waiting.
- // They must be cleared before being put back into the sudog cache.
- // Clear before unlinking, because if a stack copy happens after the unlink,
- // they will not be updated, they will be left pointing to the old stack,
- // which creates dangling pointers, which may be detected by the
- // garbage collector.
- for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
- sg1.selectdone = nil
- sg1.elem = nil
- }
- gp.waiting = nil
- for i := int(sel.ncase) - 1; i >= 0; i-- {
- k = &scases[pollorder[i]]
- if sglist.releasetime > 0 {
- k.releasetime = sglist.releasetime
- }
- if sg == sglist {
- cas = k
- } else {
- c = k._chan
- if k.kind == _CaseSend {
- c.sendq.dequeueSudoG(sglist)
- } else {
- c.recvq.dequeueSudoG(sglist)
- }
- }
- sgnext = sglist.waitlink
- sglist.waitlink = nil
- releaseSudog(sglist)
- sglist = sgnext
- }
-
- if cas == nil {
- goto loop
- }
-
- c = cas._chan
-
- if c.dataqsiz > 0 {
- gothrow("selectgo: shouldn't happen")
- }
-
- if debugSelect {
- print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n")
- }
-
- if cas.kind == _CaseRecv {
- if cas.receivedp != nil {
- *cas.receivedp = true
- }
- }
-
- if raceenabled {
- if cas.kind == _CaseRecv && cas.elem != nil {
- raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
- } else if cas.kind == _CaseSend {
- raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
- }
- }
-
- selunlock(sel)
- goto retc
-
-asyncrecv:
- // can receive from buffer
- if raceenabled {
- if cas.elem != nil {
- raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
- }
- raceacquire(chanbuf(c, c.recvx))
- racerelease(chanbuf(c, c.recvx))
- }
- if cas.receivedp != nil {
- *cas.receivedp = true
- }
- if cas.elem != nil {
- memmove(cas.elem, chanbuf(c, c.recvx), uintptr(c.elemsize))
- }
- memclr(chanbuf(c, c.recvx), uintptr(c.elemsize))
- c.recvx++
- if c.recvx == c.dataqsiz {
- c.recvx = 0
- }
- c.qcount--
- sg = c.sendq.dequeue()
- if sg != nil {
- gp = sg.g
- selunlock(sel)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
- } else {
- selunlock(sel)
- }
- goto retc
-
-asyncsend:
- // can send to buffer
- if raceenabled {
- raceacquire(chanbuf(c, c.sendx))
- racerelease(chanbuf(c, c.sendx))
- raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
- }
- memmove(chanbuf(c, c.sendx), cas.elem, uintptr(c.elemsize))
- c.sendx++
- if c.sendx == c.dataqsiz {
- c.sendx = 0
- }
- c.qcount++
- sg = c.recvq.dequeue()
- if sg != nil {
- gp = sg.g
- selunlock(sel)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
- } else {
- selunlock(sel)
- }
- goto retc
-
-syncrecv:
- // can receive from sleeping sender (sg)
- if raceenabled {
- if cas.elem != nil {
- raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
- }
- racesync(c, sg)
- }
- selunlock(sel)
- if debugSelect {
- print("syncrecv: sel=", sel, " c=", c, "\n")
- }
- if cas.receivedp != nil {
- *cas.receivedp = true
- }
- if cas.elem != nil {
- memmove(cas.elem, sg.elem, uintptr(c.elemsize))
- }
- sg.elem = nil
- gp = sg.g
- gp.param = unsafe.Pointer(sg)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
- goto retc
-
-rclose:
- // read at end of closed channel
- selunlock(sel)
- if cas.receivedp != nil {
- *cas.receivedp = false
- }
- if cas.elem != nil {
- memclr(cas.elem, uintptr(c.elemsize))
- }
- if raceenabled {
- raceacquire(unsafe.Pointer(c))
- }
- goto retc
-
-syncsend:
- // can send to sleeping receiver (sg)
- if raceenabled {
- raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
- racesync(c, sg)
- }
- selunlock(sel)
- if debugSelect {
- print("syncsend: sel=", sel, " c=", c, "\n")
- }
- if sg.elem != nil {
- memmove(sg.elem, cas.elem, uintptr(c.elemsize))
- }
- sg.elem = nil
- gp = sg.g
- gp.param = unsafe.Pointer(sg)
- if sg.releasetime != 0 {
- sg.releasetime = cputicks()
- }
- goready(gp)
-
-retc:
- if cas.releasetime > 0 {
- blockevent(cas.releasetime-t0, 2)
- }
- return cas.pc, cas.so
-
-sclose:
- // send on closed channel
- selunlock(sel)
- panic("send on closed channel")
-}
-
-func (c *hchan) sortkey() uintptr {
- // TODO(khr): if we have a moving garbage collector, we'll need to
- // change this function.
- return uintptr(unsafe.Pointer(c))
-}
-
-// A runtimeSelect is a single case passed to rselect.
-// This must match ../reflect/value.go:/runtimeSelect
-type runtimeSelect struct {
- dir selectDir
- typ unsafe.Pointer // channel type (not used here)
- ch *hchan // channel
- val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
-}
-
-// These values must match ../reflect/value.go:/SelectDir.
-type selectDir int
-
-const (
- _ selectDir = iota
- selectSend // case Chan <- Send
- selectRecv // case <-Chan:
- selectDefault // default
-)
-
-func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
- // flagNoScan is safe here, because all objects are also referenced from cases.
- size := selectsize(uintptr(len(cases)))
- sel := (*_select)(mallocgc(size, nil, flagNoScan))
- newselect(sel, int64(size), int32(len(cases)))
- r := new(bool)
- for i := range cases {
- rc := &cases[i]
- switch rc.dir {
- case selectDefault:
- selectdefaultImpl(sel, uintptr(i), 0)
- case selectSend:
- if rc.ch == nil {
- break
- }
- selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0)
- case selectRecv:
- if rc.ch == nil {
- break
- }
- selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0)
- }
- }
-
- pc, _ := selectgoImpl(sel)
- chosen = int(pc)
- recvOK = *r
- return
-}
-
-func (q *waitq) dequeueSudoG(s *sudog) {
- var prevsgp *sudog
- l := &q.first
- for {
- sgp := *l
- if sgp == nil {
- return
- }
- if sgp == s {
- *l = sgp.next
- if q.last == sgp {
- q.last = prevsgp
- }
- s.next = nil
- return
- }
- l = &sgp.next
- prevsgp = sgp
- }
-}
diff --git a/libgo/go/runtime/sema.go b/libgo/go/runtime/sema.go
deleted file mode 100644
index 26dbd30..0000000
--- a/libgo/go/runtime/sema.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Semaphore implementation exposed to Go.
-// Intended use is provide a sleep and wakeup
-// primitive that can be used in the contended case
-// of other synchronization primitives.
-// Thus it targets the same goal as Linux's futex,
-// but it has much simpler semantics.
-//
-// That is, don't think of these as semaphores.
-// Think of them as a way to implement sleep and wakeup
-// such that every sleep is paired with a single wakeup,
-// even if, due to races, the wakeup happens before the sleep.
-//
-// See Mullender and Cox, ``Semaphores in Plan 9,''
-// http://swtch.com/semaphore.pdf
-
-package runtime
-
-import "unsafe"
-
-// Asynchronous semaphore for sync.Mutex.
-
-type semaRoot struct {
- lock mutex
- head *sudog
- tail *sudog
- nwait uint32 // Number of waiters. Read w/o the lock.
-}
-
-// Prime to not correlate with any user patterns.
-const semTabSize = 251
-
-var semtable [semTabSize]struct {
- root semaRoot
- pad [_CacheLineSize - unsafe.Sizeof(semaRoot{})]byte
-}
-
-// Called from sync/net packages.
-func asyncsemacquire(addr *uint32) {
- semacquire(addr, true)
-}
-
-func asyncsemrelease(addr *uint32) {
- semrelease(addr)
-}
-
-// Called from runtime.
-func semacquire(addr *uint32, profile bool) {
- gp := getg()
- if gp != gp.m.curg {
- gothrow("semacquire not on the G stack")
- }
-
- // Easy case.
- if cansemacquire(addr) {
- return
- }
-
- // Harder case:
- // increment waiter count
- // try cansemacquire one more time, return if succeeded
- // enqueue itself as a waiter
- // sleep
- // (waiter descriptor is dequeued by signaler)
- s := acquireSudog()
- root := semroot(addr)
- t0 := int64(0)
- s.releasetime = 0
- if profile && blockprofilerate > 0 {
- t0 = cputicks()
- s.releasetime = -1
- }
- for {
- lock(&root.lock)
- // Add ourselves to nwait to disable "easy case" in semrelease.
- xadd(&root.nwait, 1)
- // Check cansemacquire to avoid missed wakeup.
- if cansemacquire(addr) {
- xadd(&root.nwait, -1)
- unlock(&root.lock)
- break
- }
- // Any semrelease after the cansemacquire knows we're waiting
- // (we set nwait above), so go to sleep.
- root.queue(addr, s)
- goparkunlock(&root.lock, "semacquire")
- if cansemacquire(addr) {
- break
- }
- }
- if s.releasetime > 0 {
- blockevent(int64(s.releasetime)-t0, 3)
- }
- releaseSudog(s)
-}
-
-func semrelease(addr *uint32) {
- root := semroot(addr)
- xadd(addr, 1)
-
- // Easy case: no waiters?
- // This check must happen after the xadd, to avoid a missed wakeup
- // (see loop in semacquire).
- if atomicload(&root.nwait) == 0 {
- return
- }
-
- // Harder case: search for a waiter and wake it.
- lock(&root.lock)
- if atomicload(&root.nwait) == 0 {
- // The count is already consumed by another goroutine,
- // so no need to wake up another goroutine.
- unlock(&root.lock)
- return
- }
- s := root.head
- for ; s != nil; s = s.next {
- if s.elem == unsafe.Pointer(addr) {
- xadd(&root.nwait, -1)
- root.dequeue(s)
- break
- }
- }
- unlock(&root.lock)
- if s != nil {
- if s.releasetime != 0 {
- s.releasetime = cputicks()
- }
- goready(s.g)
- }
-}
-
-func semroot(addr *uint32) *semaRoot {
- return &semtable[(uintptr(unsafe.Pointer(addr))>>3)%semTabSize].root
-}
-
-func cansemacquire(addr *uint32) bool {
- for {
- v := atomicload(addr)
- if v == 0 {
- return false
- }
- if cas(addr, v, v-1) {
- return true
- }
- }
-}
-
-func (root *semaRoot) queue(addr *uint32, s *sudog) {
- s.g = getg()
- s.elem = unsafe.Pointer(addr)
- s.next = nil
- s.prev = root.tail
- if root.tail != nil {
- root.tail.next = s
- } else {
- root.head = s
- }
- root.tail = s
-}
-
-func (root *semaRoot) dequeue(s *sudog) {
- if s.next != nil {
- s.next.prev = s.prev
- } else {
- root.tail = s.prev
- }
- if s.prev != nil {
- s.prev.next = s.next
- } else {
- root.head = s.next
- }
- s.elem = nil
- s.next = nil
- s.prev = nil
-}
-
-// Synchronous semaphore for sync.Cond.
-type syncSema struct {
- lock mutex
- head *sudog
- tail *sudog
-}
-
-// Syncsemacquire waits for a pairing syncsemrelease on the same semaphore s.
-func syncsemacquire(s *syncSema) {
- lock(&s.lock)
- if s.head != nil && s.head.nrelease > 0 {
- // Have pending release, consume it.
- var wake *sudog
- s.head.nrelease--
- if s.head.nrelease == 0 {
- wake = s.head
- s.head = wake.next
- if s.head == nil {
- s.tail = nil
- }
- }
- unlock(&s.lock)
- if wake != nil {
- wake.next = nil
- goready(wake.g)
- }
- } else {
- // Enqueue itself.
- w := acquireSudog()
- w.g = getg()
- w.nrelease = -1
- w.next = nil
- w.releasetime = 0
- t0 := int64(0)
- if blockprofilerate > 0 {
- t0 = cputicks()
- w.releasetime = -1
- }
- if s.tail == nil {
- s.head = w
- } else {
- s.tail.next = w
- }
- s.tail = w
- goparkunlock(&s.lock, "semacquire")
- if t0 != 0 {
- blockevent(int64(w.releasetime)-t0, 2)
- }
- releaseSudog(w)
- }
-}
-
-// Syncsemrelease waits for n pairing syncsemacquire on the same semaphore s.
-func syncsemrelease(s *syncSema, n uint32) {
- lock(&s.lock)
- for n > 0 && s.head != nil && s.head.nrelease < 0 {
- // Have pending acquire, satisfy it.
- wake := s.head
- s.head = wake.next
- if s.head == nil {
- s.tail = nil
- }
- if wake.releasetime != 0 {
- wake.releasetime = cputicks()
- }
- wake.next = nil
- goready(wake.g)
- n--
- }
- if n > 0 {
- // enqueue itself
- w := acquireSudog()
- w.g = getg()
- w.nrelease = int32(n)
- w.next = nil
- w.releasetime = 0
- if s.tail == nil {
- s.head = w
- } else {
- s.tail.next = w
- }
- s.tail = w
- goparkunlock(&s.lock, "semarelease")
- releaseSudog(w)
- } else {
- unlock(&s.lock)
- }
-}
-
-func syncsemcheck(sz uintptr) {
- if sz != unsafe.Sizeof(syncSema{}) {
- print("runtime: bad syncSema size - sync=", sz, " runtime=", unsafe.Sizeof(syncSema{}), "\n")
- gothrow("bad syncSema size")
- }
-}
diff --git a/libgo/go/runtime/signal_unix.go b/libgo/go/runtime/signal_unix.go
deleted file mode 100644
index ba77b6e..0000000
--- a/libgo/go/runtime/signal_unix.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
-
-package runtime
-
-func sigpipe()
-
-func os_sigpipe() {
- onM(sigpipe)
-}
diff --git a/libgo/go/runtime/sigpanic_unix.go b/libgo/go/runtime/sigpanic_unix.go
deleted file mode 100644
index 6807985..0000000
--- a/libgo/go/runtime/sigpanic_unix.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
-
-package runtime
-
-func signame(int32) *byte
-
-func sigpanic() {
- g := getg()
- if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
- }
-
- switch g.sig {
- case _SIGBUS:
- if g.sigcode0 == _BUS_ADRERR && g.sigcode1 < 0x1000 || g.paniconfault {
- panicmem()
- }
- print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
- case _SIGSEGV:
- if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 || g.paniconfault {
- panicmem()
- }
- print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
- case _SIGFPE:
- switch g.sigcode0 {
- case _FPE_INTDIV:
- panicdivide()
- case _FPE_INTOVF:
- panicoverflow()
- }
- panicfloat()
- }
- panic(errorString(gostringnocopy(signame(g.sig))))
-}
diff --git a/libgo/go/runtime/sigqueue.go b/libgo/go/runtime/sigqueue.go
deleted file mode 100644
index fed4560..0000000
--- a/libgo/go/runtime/sigqueue.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements runtime support for signal handling.
-//
-// Most synchronization primitives are not available from
-// the signal handler (it cannot block, allocate memory, or use locks)
-// so the handler communicates with a processing goroutine
-// via struct sig, below.
-//
-// sigsend is called by the signal handler to queue a new signal.
-// signal_recv is called by the Go program to receive a newly queued signal.
-// Synchronization between sigsend and signal_recv is based on the sig.state
-// variable. It can be in 3 states: sigIdle, sigReceiving and sigSending.
-// sigReceiving means that signal_recv is blocked on sig.Note and there are no
-// new pending signals.
-// sigSending means that sig.mask *may* contain new pending signals,
-// signal_recv can't be blocked in this state.
-// sigIdle means that there are no new pending signals and signal_recv is not blocked.
-// Transitions between states are done atomically with CAS.
-// When signal_recv is unblocked, it resets sig.Note and rechecks sig.mask.
-// If several sigsends and signal_recv execute concurrently, it can lead to
-// unnecessary rechecks of sig.mask, but it cannot lead to missed signals
-// nor deadlocks.
-
-package runtime
-
-import "unsafe"
-
-var sig struct {
- note note
- mask [(_NSIG + 31) / 32]uint32
- wanted [(_NSIG + 31) / 32]uint32
- recv [(_NSIG + 31) / 32]uint32
- state uint32
- inuse bool
-}
-
-const (
- sigIdle = iota
- sigReceiving
- sigSending
-)
-
-// Called from sighandler to send a signal back out of the signal handling thread.
-// Reports whether the signal was sent. If not, the caller typically crashes the program.
-func sigsend(s int32) bool {
- bit := uint32(1) << uint(s&31)
- if !sig.inuse || s < 0 || int(s) >= 32*len(sig.wanted) || sig.wanted[s/32]&bit == 0 {
- return false
- }
-
- // Add signal to outgoing queue.
- for {
- mask := sig.mask[s/32]
- if mask&bit != 0 {
- return true // signal already in queue
- }
- if cas(&sig.mask[s/32], mask, mask|bit) {
- break
- }
- }
-
- // Notify receiver that queue has new bit.
-Send:
- for {
- switch atomicload(&sig.state) {
- default:
- gothrow("sigsend: inconsistent state")
- case sigIdle:
- if cas(&sig.state, sigIdle, sigSending) {
- break Send
- }
- case sigSending:
- // notification already pending
- break Send
- case sigReceiving:
- if cas(&sig.state, sigReceiving, sigIdle) {
- notewakeup(&sig.note)
- break Send
- }
- }
- }
-
- return true
-}
-
-// Called to receive the next queued signal.
-// Must only be called from a single goroutine at a time.
-func signal_recv() uint32 {
- for {
- // Serve any signals from local copy.
- for i := uint32(0); i < _NSIG; i++ {
- if sig.recv[i/32]&(1<<(i&31)) != 0 {
- sig.recv[i/32] &^= 1 << (i & 31)
- return i
- }
- }
-
- // Wait for updates to be available from signal sender.
- Receive:
- for {
- switch atomicload(&sig.state) {
- default:
- gothrow("signal_recv: inconsistent state")
- case sigIdle:
- if cas(&sig.state, sigIdle, sigReceiving) {
- notetsleepg(&sig.note, -1)
- noteclear(&sig.note)
- break Receive
- }
- case sigSending:
- if cas(&sig.state, sigSending, sigIdle) {
- break Receive
- }
- }
- }
-
- // Incorporate updates from sender into local copy.
- for i := range sig.mask {
- sig.recv[i] = xchg(&sig.mask[i], 0)
- }
- }
-}
-
-// Must only be called from a single goroutine at a time.
-func signal_enable(s uint32) {
- if !sig.inuse {
- // The first call to signal_enable is for us
- // to use for initialization. It does not pass
- // signal information in m.
- sig.inuse = true // enable reception of signals; cannot disable
- noteclear(&sig.note)
- return
- }
-
- if int(s) >= len(sig.wanted)*32 {
- return
- }
- sig.wanted[s/32] |= 1 << (s & 31)
- sigenable_go(s)
-}
-
-// Must only be called from a single goroutine at a time.
-func signal_disable(s uint32) {
- if int(s) >= len(sig.wanted)*32 {
- return
- }
- sig.wanted[s/32] &^= 1 << (s & 31)
- sigdisable_go(s)
-}
-
-// This runs on a foreign stack, without an m or a g. No stack split.
-//go:nosplit
-func badsignal(sig uintptr) {
- // Some external libraries, for example, OpenBLAS, create worker threads in
- // a global constructor. If we're doing cpu profiling, and the SIGPROF signal
- // comes to one of the foreign threads before we make our first cgo call, the
- // call to cgocallback below will bring down the whole process.
- // It's better to miss a few SIGPROF signals than to abort in this case.
- // See http://golang.org/issue/9456.
- if _SIGPROF != 0 && sig == _SIGPROF && needextram != 0 {
- return
- }
- cgocallback(unsafe.Pointer(funcPC(sigsend)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
-}
-
-func sigenable_m()
-func sigdisable_m()
-
-func sigenable_go(s uint32) {
- g := getg()
- g.m.scalararg[0] = uintptr(s)
- onM(sigenable_m)
-}
-
-func sigdisable_go(s uint32) {
- g := getg()
- g.m.scalararg[0] = uintptr(s)
- onM(sigdisable_m)
-}
diff --git a/libgo/go/runtime/slice.go b/libgo/go/runtime/slice.go
deleted file mode 100644
index 171087d..0000000
--- a/libgo/go/runtime/slice.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-type sliceStruct struct {
- array unsafe.Pointer
- len int
- cap int
-}
-
-// TODO: take uintptrs instead of int64s?
-func makeslice(t *slicetype, len64 int64, cap64 int64) sliceStruct {
- // NOTE: The len > MaxMem/elemsize check here is not strictly necessary,
- // but it produces a 'len out of range' error instead of a 'cap out of range' error
- // when someone does make([]T, bignumber). 'cap out of range' is true too,
- // but since the cap is only being supplied implicitly, saying len is clearer.
- // See issue 4085.
- len := int(len64)
- if len64 < 0 || int64(len) != len64 || t.elem.size > 0 && uintptr(len) > maxmem/uintptr(t.elem.size) {
- panic(errorString("makeslice: len out of range"))
- }
- cap := int(cap64)
- if cap < len || int64(cap) != cap64 || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) {
- panic(errorString("makeslice: cap out of range"))
- }
- p := newarray(t.elem, uintptr(cap))
- return sliceStruct{p, len, cap}
-}
-
-// TODO: take uintptr instead of int64?
-func growslice(t *slicetype, old sliceStruct, n int64) sliceStruct {
- if n < 1 {
- panic(errorString("growslice: invalid n"))
- }
-
- cap64 := int64(old.cap) + n
- cap := int(cap64)
-
- if int64(cap) != cap64 || cap < old.cap || t.elem.size > 0 && uintptr(cap) > maxmem/uintptr(t.elem.size) {
- panic(errorString("growslice: cap out of range"))
- }
-
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadrangepc(old.array, uintptr(old.len*int(t.elem.size)), callerpc, funcPC(growslice))
- }
-
- et := t.elem
- if et.size == 0 {
- return sliceStruct{old.array, old.len, cap}
- }
-
- newcap := old.cap
- if newcap+newcap < cap {
- newcap = cap
- } else {
- for {
- if old.len < 1024 {
- newcap += newcap
- } else {
- newcap += newcap / 4
- }
- if newcap >= cap {
- break
- }
- }
- }
-
- if uintptr(newcap) >= maxmem/uintptr(et.size) {
- panic(errorString("growslice: cap out of range"))
- }
- lenmem := uintptr(old.len) * uintptr(et.size)
- capmem := goroundupsize(uintptr(newcap) * uintptr(et.size))
- newcap = int(capmem / uintptr(et.size))
- var p unsafe.Pointer
- if et.kind&kindNoPointers != 0 {
- p = rawmem(capmem)
- memclr(add(p, lenmem), capmem-lenmem)
- } else {
- // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan unitialized memory
- p = newarray(et, uintptr(newcap))
- }
- memmove(p, old.array, lenmem)
-
- return sliceStruct{p, old.len, newcap}
-}
-
-func slicecopy(to sliceStruct, fm sliceStruct, width uintptr) int {
- if fm.len == 0 || to.len == 0 || width == 0 {
- return 0
- }
-
- n := fm.len
- if to.len < n {
- n = to.len
- }
-
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&to))
- pc := funcPC(slicecopy)
- racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc)
- racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc)
- }
-
- size := uintptr(n) * width
- if size == 1 { // common case worth about 2x to do here
- // TODO: is this still worth it with new memmove impl?
- *(*byte)(to.array) = *(*byte)(fm.array) // known to be a byte pointer
- } else {
- memmove(to.array, fm.array, size)
- }
- return int(n)
-}
-
-func slicestringcopy(to []byte, fm string) int {
- if len(fm) == 0 || len(to) == 0 {
- return 0
- }
-
- n := len(fm)
- if len(to) < n {
- n = len(to)
- }
-
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&to))
- pc := funcPC(slicestringcopy)
- racewriterangepc(unsafe.Pointer(&to[0]), uintptr(n), callerpc, pc)
- }
-
- memmove(unsafe.Pointer(&to[0]), unsafe.Pointer((*stringStruct)(unsafe.Pointer(&fm)).str), uintptr(n))
- return n
-}
diff --git a/libgo/go/runtime/softfloat64.go b/libgo/go/runtime/softfloat64.go
deleted file mode 100644
index 4fcf8f2..0000000
--- a/libgo/go/runtime/softfloat64.go
+++ /dev/null
@@ -1,498 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Software IEEE754 64-bit floating point.
-// Only referred to (and thus linked in) by arm port
-// and by tests in this directory.
-
-package runtime
-
-const (
- mantbits64 uint = 52
- expbits64 uint = 11
- bias64 = -1<<(expbits64-1) + 1
-
- nan64 uint64 = (1<<expbits64-1)<<mantbits64 + 1
- inf64 uint64 = (1<<expbits64 - 1) << mantbits64
- neg64 uint64 = 1 << (expbits64 + mantbits64)
-
- mantbits32 uint = 23
- expbits32 uint = 8
- bias32 = -1<<(expbits32-1) + 1
-
- nan32 uint32 = (1<<expbits32-1)<<mantbits32 + 1
- inf32 uint32 = (1<<expbits32 - 1) << mantbits32
- neg32 uint32 = 1 << (expbits32 + mantbits32)
-)
-
-func funpack64(f uint64) (sign, mant uint64, exp int, inf, nan bool) {
- sign = f & (1 << (mantbits64 + expbits64))
- mant = f & (1<<mantbits64 - 1)
- exp = int(f>>mantbits64) & (1<<expbits64 - 1)
-
- switch exp {
- case 1<<expbits64 - 1:
- if mant != 0 {
- nan = true
- return
- }
- inf = true
- return
-
- case 0:
- // denormalized
- if mant != 0 {
- exp += bias64 + 1
- for mant < 1<<mantbits64 {
- mant <<= 1
- exp--
- }
- }
-
- default:
- // add implicit top bit
- mant |= 1 << mantbits64
- exp += bias64
- }
- return
-}
-
-func funpack32(f uint32) (sign, mant uint32, exp int, inf, nan bool) {
- sign = f & (1 << (mantbits32 + expbits32))
- mant = f & (1<<mantbits32 - 1)
- exp = int(f>>mantbits32) & (1<<expbits32 - 1)
-
- switch exp {
- case 1<<expbits32 - 1:
- if mant != 0 {
- nan = true
- return
- }
- inf = true
- return
-
- case 0:
- // denormalized
- if mant != 0 {
- exp += bias32 + 1
- for mant < 1<<mantbits32 {
- mant <<= 1
- exp--
- }
- }
-
- default:
- // add implicit top bit
- mant |= 1 << mantbits32
- exp += bias32
- }
- return
-}
-
-func fpack64(sign, mant uint64, exp int, trunc uint64) uint64 {
- mant0, exp0, trunc0 := mant, exp, trunc
- if mant == 0 {
- return sign
- }
- for mant < 1<<mantbits64 {
- mant <<= 1
- exp--
- }
- for mant >= 4<<mantbits64 {
- trunc |= mant & 1
- mant >>= 1
- exp++
- }
- if mant >= 2<<mantbits64 {
- if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
- mant++
- if mant >= 4<<mantbits64 {
- mant >>= 1
- exp++
- }
- }
- mant >>= 1
- exp++
- }
- if exp >= 1<<expbits64-1+bias64 {
- return sign ^ inf64
- }
- if exp < bias64+1 {
- if exp < bias64-int(mantbits64) {
- return sign | 0
- }
- // repeat expecting denormal
- mant, exp, trunc = mant0, exp0, trunc0
- for exp < bias64 {
- trunc |= mant & 1
- mant >>= 1
- exp++
- }
- if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
- mant++
- }
- mant >>= 1
- exp++
- if mant < 1<<mantbits64 {
- return sign | mant
- }
- }
- return sign | uint64(exp-bias64)<<mantbits64 | mant&(1<<mantbits64-1)
-}
-
-func fpack32(sign, mant uint32, exp int, trunc uint32) uint32 {
- mant0, exp0, trunc0 := mant, exp, trunc
- if mant == 0 {
- return sign
- }
- for mant < 1<<mantbits32 {
- mant <<= 1
- exp--
- }
- for mant >= 4<<mantbits32 {
- trunc |= mant & 1
- mant >>= 1
- exp++
- }
- if mant >= 2<<mantbits32 {
- if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
- mant++
- if mant >= 4<<mantbits32 {
- mant >>= 1
- exp++
- }
- }
- mant >>= 1
- exp++
- }
- if exp >= 1<<expbits32-1+bias32 {
- return sign ^ inf32
- }
- if exp < bias32+1 {
- if exp < bias32-int(mantbits32) {
- return sign | 0
- }
- // repeat expecting denormal
- mant, exp, trunc = mant0, exp0, trunc0
- for exp < bias32 {
- trunc |= mant & 1
- mant >>= 1
- exp++
- }
- if mant&1 != 0 && (trunc != 0 || mant&2 != 0) {
- mant++
- }
- mant >>= 1
- exp++
- if mant < 1<<mantbits32 {
- return sign | mant
- }
- }
- return sign | uint32(exp-bias32)<<mantbits32 | mant&(1<<mantbits32-1)
-}
-
-func fadd64(f, g uint64) uint64 {
- fs, fm, fe, fi, fn := funpack64(f)
- gs, gm, ge, gi, gn := funpack64(g)
-
- // Special cases.
- switch {
- case fn || gn: // NaN + x or x + NaN = NaN
- return nan64
-
- case fi && gi && fs != gs: // +Inf + -Inf or -Inf + +Inf = NaN
- return nan64
-
- case fi: // ±Inf + g = ±Inf
- return f
-
- case gi: // f + ±Inf = ±Inf
- return g
-
- case fm == 0 && gm == 0 && fs != 0 && gs != 0: // -0 + -0 = -0
- return f
-
- case fm == 0: // 0 + g = g but 0 + -0 = +0
- if gm == 0 {
- g ^= gs
- }
- return g
-
- case gm == 0: // f + 0 = f
- return f
-
- }
-
- if fe < ge || fe == ge && fm < gm {
- f, g, fs, fm, fe, gs, gm, ge = g, f, gs, gm, ge, fs, fm, fe
- }
-
- shift := uint(fe - ge)
- fm <<= 2
- gm <<= 2
- trunc := gm & (1<<shift - 1)
- gm >>= shift
- if fs == gs {
- fm += gm
- } else {
- fm -= gm
- if trunc != 0 {
- fm--
- }
- }
- if fm == 0 {
- fs = 0
- }
- return fpack64(fs, fm, fe-2, trunc)
-}
-
-func fsub64(f, g uint64) uint64 {
- return fadd64(f, fneg64(g))
-}
-
-func fneg64(f uint64) uint64 {
- return f ^ (1 << (mantbits64 + expbits64))
-}
-
-func fmul64(f, g uint64) uint64 {
- fs, fm, fe, fi, fn := funpack64(f)
- gs, gm, ge, gi, gn := funpack64(g)
-
- // Special cases.
- switch {
- case fn || gn: // NaN * g or f * NaN = NaN
- return nan64
-
- case fi && gi: // Inf * Inf = Inf (with sign adjusted)
- return f ^ gs
-
- case fi && gm == 0, fm == 0 && gi: // 0 * Inf = Inf * 0 = NaN
- return nan64
-
- case fm == 0: // 0 * x = 0 (with sign adjusted)
- return f ^ gs
-
- case gm == 0: // x * 0 = 0 (with sign adjusted)
- return g ^ fs
- }
-
- // 53-bit * 53-bit = 107- or 108-bit
- lo, hi := mullu(fm, gm)
- shift := mantbits64 - 1
- trunc := lo & (1<<shift - 1)
- mant := hi<<(64-shift) | lo>>shift
- return fpack64(fs^gs, mant, fe+ge-1, trunc)
-}
-
-func fdiv64(f, g uint64) uint64 {
- fs, fm, fe, fi, fn := funpack64(f)
- gs, gm, ge, gi, gn := funpack64(g)
-
- // Special cases.
- switch {
- case fn || gn: // NaN / g = f / NaN = NaN
- return nan64
-
- case fi && gi: // ±Inf / ±Inf = NaN
- return nan64
-
- case !fi && !gi && fm == 0 && gm == 0: // 0 / 0 = NaN
- return nan64
-
- case fi, !gi && gm == 0: // Inf / g = f / 0 = Inf
- return fs ^ gs ^ inf64
-
- case gi, fm == 0: // f / Inf = 0 / g = Inf
- return fs ^ gs ^ 0
- }
- _, _, _, _ = fi, fn, gi, gn
-
- // 53-bit<<54 / 53-bit = 53- or 54-bit.
- shift := mantbits64 + 2
- q, r := divlu(fm>>(64-shift), fm<<shift, gm)
- return fpack64(fs^gs, q, fe-ge-2, r)
-}
-
-func f64to32(f uint64) uint32 {
- fs, fm, fe, fi, fn := funpack64(f)
- if fn {
- return nan32
- }
- fs32 := uint32(fs >> 32)
- if fi {
- return fs32 ^ inf32
- }
- const d = mantbits64 - mantbits32 - 1
- return fpack32(fs32, uint32(fm>>d), fe-1, uint32(fm&(1<<d-1)))
-}
-
-func f32to64(f uint32) uint64 {
- const d = mantbits64 - mantbits32
- fs, fm, fe, fi, fn := funpack32(f)
- if fn {
- return nan64
- }
- fs64 := uint64(fs) << 32
- if fi {
- return fs64 ^ inf64
- }
- return fpack64(fs64, uint64(fm)<<d, fe, 0)
-}
-
-func fcmp64(f, g uint64) (cmp int, isnan bool) {
- fs, fm, _, fi, fn := funpack64(f)
- gs, gm, _, gi, gn := funpack64(g)
-
- switch {
- case fn, gn: // flag NaN
- return 0, true
-
- case !fi && !gi && fm == 0 && gm == 0: // ±0 == ±0
- return 0, false
-
- case fs > gs: // f < 0, g > 0
- return -1, false
-
- case fs < gs: // f > 0, g < 0
- return +1, false
-
- // Same sign, not NaN.
- // Can compare encodings directly now.
- // Reverse for sign.
- case fs == 0 && f < g, fs != 0 && f > g:
- return -1, false
-
- case fs == 0 && f > g, fs != 0 && f < g:
- return +1, false
- }
-
- // f == g
- return 0, false
-}
-
-func f64toint(f uint64) (val int64, ok bool) {
- fs, fm, fe, fi, fn := funpack64(f)
-
- switch {
- case fi, fn: // NaN
- return 0, false
-
- case fe < -1: // f < 0.5
- return 0, false
-
- case fe > 63: // f >= 2^63
- if fs != 0 && fm == 0 { // f == -2^63
- return -1 << 63, true
- }
- if fs != 0 {
- return 0, false
- }
- return 0, false
- }
-
- for fe > int(mantbits64) {
- fe--
- fm <<= 1
- }
- for fe < int(mantbits64) {
- fe++
- fm >>= 1
- }
- val = int64(fm)
- if fs != 0 {
- val = -val
- }
- return val, true
-}
-
-func fintto64(val int64) (f uint64) {
- fs := uint64(val) & (1 << 63)
- mant := uint64(val)
- if fs != 0 {
- mant = -mant
- }
- return fpack64(fs, mant, int(mantbits64), 0)
-}
-
-// 64x64 -> 128 multiply.
-// adapted from hacker's delight.
-func mullu(u, v uint64) (lo, hi uint64) {
- const (
- s = 32
- mask = 1<<s - 1
- )
- u0 := u & mask
- u1 := u >> s
- v0 := v & mask
- v1 := v >> s
- w0 := u0 * v0
- t := u1*v0 + w0>>s
- w1 := t & mask
- w2 := t >> s
- w1 += u0 * v1
- return u * v, u1*v1 + w2 + w1>>s
-}
-
-// 128/64 -> 64 quotient, 64 remainder.
-// adapted from hacker's delight
-func divlu(u1, u0, v uint64) (q, r uint64) {
- const b = 1 << 32
-
- if u1 >= v {
- return 1<<64 - 1, 1<<64 - 1
- }
-
- // s = nlz(v); v <<= s
- s := uint(0)
- for v&(1<<63) == 0 {
- s++
- v <<= 1
- }
-
- vn1 := v >> 32
- vn0 := v & (1<<32 - 1)
- un32 := u1<<s | u0>>(64-s)
- un10 := u0 << s
- un1 := un10 >> 32
- un0 := un10 & (1<<32 - 1)
- q1 := un32 / vn1
- rhat := un32 - q1*vn1
-
-again1:
- if q1 >= b || q1*vn0 > b*rhat+un1 {
- q1--
- rhat += vn1
- if rhat < b {
- goto again1
- }
- }
-
- un21 := un32*b + un1 - q1*v
- q0 := un21 / vn1
- rhat = un21 - q0*vn1
-
-again2:
- if q0 >= b || q0*vn0 > b*rhat+un0 {
- q0--
- rhat += vn1
- if rhat < b {
- goto again2
- }
- }
-
- return q1*b + q0, (un21*b + un0 - q0*v) >> s
-}
-
-// callable from C
-
-func fadd64c(f, g uint64, ret *uint64) { *ret = fadd64(f, g) }
-func fsub64c(f, g uint64, ret *uint64) { *ret = fsub64(f, g) }
-func fmul64c(f, g uint64, ret *uint64) { *ret = fmul64(f, g) }
-func fdiv64c(f, g uint64, ret *uint64) { *ret = fdiv64(f, g) }
-func fneg64c(f uint64, ret *uint64) { *ret = fneg64(f) }
-func f32to64c(f uint32, ret *uint64) { *ret = f32to64(f) }
-func f64to32c(f uint64, ret *uint32) { *ret = f64to32(f) }
-func fcmp64c(f, g uint64, ret *int, retnan *bool) { *ret, *retnan = fcmp64(f, g) }
-func fintto64c(val int64, ret *uint64) { *ret = fintto64(val) }
-func f64tointc(f uint64, ret *int64, retok *bool) { *ret, *retok = f64toint(f) }
diff --git a/libgo/go/runtime/softfloat64_test.go b/libgo/go/runtime/softfloat64_test.go
deleted file mode 100644
index df63010..0000000
--- a/libgo/go/runtime/softfloat64_test.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime_test
-
-import (
- "math"
- "math/rand"
- . "runtime"
- "testing"
-)
-
-// turn uint64 op into float64 op
-func fop(f func(x, y uint64) uint64) func(x, y float64) float64 {
- return func(x, y float64) float64 {
- bx := math.Float64bits(x)
- by := math.Float64bits(y)
- return math.Float64frombits(f(bx, by))
- }
-}
-
-func add(x, y float64) float64 { return x + y }
-func sub(x, y float64) float64 { return x - y }
-func mul(x, y float64) float64 { return x * y }
-func div(x, y float64) float64 { return x / y }
-
-func TestFloat64(t *testing.T) {
- base := []float64{
- 0,
- math.Copysign(0, -1),
- -1,
- 1,
- math.NaN(),
- math.Inf(+1),
- math.Inf(-1),
- 0.1,
- 1.5,
- 1.9999999999999998, // all 1s mantissa
- 1.3333333333333333, // 1.010101010101...
- 1.1428571428571428, // 1.001001001001...
- 1.112536929253601e-308, // first normal
- 2,
- 4,
- 8,
- 16,
- 32,
- 64,
- 128,
- 256,
- 3,
- 12,
- 1234,
- 123456,
- -0.1,
- -1.5,
- -1.9999999999999998,
- -1.3333333333333333,
- -1.1428571428571428,
- -2,
- -3,
- 1e-200,
- 1e-300,
- 1e-310,
- 5e-324,
- 1e-105,
- 1e-305,
- 1e+200,
- 1e+306,
- 1e+307,
- 1e+308,
- }
- all := make([]float64, 200)
- copy(all, base)
- for i := len(base); i < len(all); i++ {
- all[i] = rand.NormFloat64()
- }
-
- test(t, "+", add, fop(Fadd64), all)
- test(t, "-", sub, fop(Fsub64), all)
- if GOARCH != "386" { // 386 is not precise!
- test(t, "*", mul, fop(Fmul64), all)
- test(t, "/", div, fop(Fdiv64), all)
- }
-}
-
-// 64 -hw-> 32 -hw-> 64
-func trunc32(f float64) float64 {
- return float64(float32(f))
-}
-
-// 64 -sw->32 -hw-> 64
-func to32sw(f float64) float64 {
- return float64(math.Float32frombits(F64to32(math.Float64bits(f))))
-}
-
-// 64 -hw->32 -sw-> 64
-func to64sw(f float64) float64 {
- return math.Float64frombits(F32to64(math.Float32bits(float32(f))))
-}
-
-// float64 -hw-> int64 -hw-> float64
-func hwint64(f float64) float64 {
- return float64(int64(f))
-}
-
-// float64 -hw-> int32 -hw-> float64
-func hwint32(f float64) float64 {
- return float64(int32(f))
-}
-
-// float64 -sw-> int64 -hw-> float64
-func toint64sw(f float64) float64 {
- i, ok := F64toint(math.Float64bits(f))
- if !ok {
- // There's no right answer for out of range.
- // Match the hardware to pass the test.
- i = int64(f)
- }
- return float64(i)
-}
-
-// float64 -hw-> int64 -sw-> float64
-func fromint64sw(f float64) float64 {
- return math.Float64frombits(Fintto64(int64(f)))
-}
-
-var nerr int
-
-func err(t *testing.T, format string, args ...interface{}) {
- t.Errorf(format, args...)
-
- // cut errors off after a while.
- // otherwise we spend all our time
- // allocating memory to hold the
- // formatted output.
- if nerr++; nerr >= 10 {
- t.Fatal("too many errors")
- }
-}
-
-func test(t *testing.T, op string, hw, sw func(float64, float64) float64, all []float64) {
- for _, f := range all {
- for _, g := range all {
- h := hw(f, g)
- s := sw(f, g)
- if !same(h, s) {
- err(t, "%g %s %g = sw %g, hw %g\n", f, op, g, s, h)
- }
- testu(t, "to32", trunc32, to32sw, h)
- testu(t, "to64", trunc32, to64sw, h)
- testu(t, "toint64", hwint64, toint64sw, h)
- testu(t, "fromint64", hwint64, fromint64sw, h)
- testcmp(t, f, h)
- testcmp(t, h, f)
- testcmp(t, g, h)
- testcmp(t, h, g)
- }
- }
-}
-
-func testu(t *testing.T, op string, hw, sw func(float64) float64, v float64) {
- h := hw(v)
- s := sw(v)
- if !same(h, s) {
- err(t, "%s %g = sw %g, hw %g\n", op, v, s, h)
- }
-}
-
-func hwcmp(f, g float64) (cmp int, isnan bool) {
- switch {
- case f < g:
- return -1, false
- case f > g:
- return +1, false
- case f == g:
- return 0, false
- }
- return 0, true // must be NaN
-}
-
-func testcmp(t *testing.T, f, g float64) {
- hcmp, hisnan := hwcmp(f, g)
- scmp, sisnan := Fcmp64(math.Float64bits(f), math.Float64bits(g))
- if hcmp != scmp || hisnan != sisnan {
- err(t, "cmp(%g, %g) = sw %v, %v, hw %v, %v\n", f, g, scmp, sisnan, hcmp, hisnan)
- }
-}
-
-func same(f, g float64) bool {
- if math.IsNaN(f) && math.IsNaN(g) {
- return true
- }
- if math.Copysign(1, f) != math.Copysign(1, g) {
- return false
- }
- return f == g
-}
diff --git a/libgo/go/runtime/stack.go b/libgo/go/runtime/stack.go
deleted file mode 100644
index f1b7d32..0000000
--- a/libgo/go/runtime/stack.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- // Goroutine preemption request.
- // Stored into g->stackguard0 to cause split stack check failure.
- // Must be greater than any real sp.
- // 0xfffffade in hex.
- stackPreempt = ^uintptr(1313)
-)
diff --git a/libgo/go/runtime/string.go b/libgo/go/runtime/string.go
deleted file mode 100644
index 0809f89..0000000
--- a/libgo/go/runtime/string.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-func concatstrings(a []string) string {
- idx := 0
- l := 0
- count := 0
- for i, x := range a {
- n := len(x)
- if n == 0 {
- continue
- }
- if l+n < l {
- gothrow("string concatenation too long")
- }
- l += n
- count++
- idx = i
- }
- if count == 0 {
- return ""
- }
- if count == 1 {
- return a[idx]
- }
- s, b := rawstring(l)
- l = 0
- for _, x := range a {
- copy(b[l:], x)
- l += len(x)
- }
- return s
-}
-
-//go:nosplit
-func concatstring2(a [2]string) string {
- return concatstrings(a[:])
-}
-
-//go:nosplit
-func concatstring3(a [3]string) string {
- return concatstrings(a[:])
-}
-
-//go:nosplit
-func concatstring4(a [4]string) string {
- return concatstrings(a[:])
-}
-
-//go:nosplit
-func concatstring5(a [5]string) string {
- return concatstrings(a[:])
-}
-
-func slicebytetostring(b []byte) string {
- if raceenabled && len(b) > 0 {
- racereadrangepc(unsafe.Pointer(&b[0]),
- uintptr(len(b)),
- getcallerpc(unsafe.Pointer(&b)),
- funcPC(slicebytetostring))
- }
- s, c := rawstring(len(b))
- copy(c, b)
- return s
-}
-
-func slicebytetostringtmp(b []byte) string {
- // Return a "string" referring to the actual []byte bytes.
- // This is only for use by internal compiler optimizations
- // that know that the string form will be discarded before
- // the calling goroutine could possibly modify the original
- // slice or synchronize with another goroutine.
- // Today, the only such case is a m[string(k)] lookup where
- // m is a string-keyed map and k is a []byte.
-
- if raceenabled && len(b) > 0 {
- racereadrangepc(unsafe.Pointer(&b[0]),
- uintptr(len(b)),
- getcallerpc(unsafe.Pointer(&b)),
- funcPC(slicebytetostringtmp))
- }
- return *(*string)(unsafe.Pointer(&b))
-}
-
-func stringtoslicebyte(s string) []byte {
- b := rawbyteslice(len(s))
- copy(b, s)
- return b
-}
-
-func stringtoslicerune(s string) []rune {
- // two passes.
- // unlike slicerunetostring, no race because strings are immutable.
- n := 0
- t := s
- for len(s) > 0 {
- _, k := charntorune(s)
- s = s[k:]
- n++
- }
- a := rawruneslice(n)
- n = 0
- for len(t) > 0 {
- r, k := charntorune(t)
- t = t[k:]
- a[n] = r
- n++
- }
- return a
-}
-
-func slicerunetostring(a []rune) string {
- if raceenabled && len(a) > 0 {
- racereadrangepc(unsafe.Pointer(&a[0]),
- uintptr(len(a))*unsafe.Sizeof(a[0]),
- getcallerpc(unsafe.Pointer(&a)),
- funcPC(slicerunetostring))
- }
- var dum [4]byte
- size1 := 0
- for _, r := range a {
- size1 += runetochar(dum[:], r)
- }
- s, b := rawstring(size1 + 3)
- size2 := 0
- for _, r := range a {
- // check for race
- if size2 >= size1 {
- break
- }
- size2 += runetochar(b[size2:], r)
- }
- return s[:size2]
-}
-
-type stringStruct struct {
- str unsafe.Pointer
- len int
-}
-
-func intstring(v int64) string {
- s, b := rawstring(4)
- n := runetochar(b, rune(v))
- return s[:n]
-}
-
-// stringiter returns the index of the next
-// rune after the rune that starts at s[k].
-func stringiter(s string, k int) int {
- if k >= len(s) {
- // 0 is end of iteration
- return 0
- }
-
- c := s[k]
- if c < runeself {
- return k + 1
- }
-
- // multi-char rune
- _, n := charntorune(s[k:])
- return k + n
-}
-
-// stringiter2 returns the rune that starts at s[k]
-// and the index where the next rune starts.
-func stringiter2(s string, k int) (int, rune) {
- if k >= len(s) {
- // 0 is end of iteration
- return 0, 0
- }
-
- c := s[k]
- if c < runeself {
- return k + 1, rune(c)
- }
-
- // multi-char rune
- r, n := charntorune(s[k:])
- return k + n, r
-}
-
-// rawstring allocates storage for a new string. The returned
-// string and byte slice both refer to the same storage.
-// The storage is not zeroed. Callers should use
-// b to set the string contents and then drop b.
-func rawstring(size int) (s string, b []byte) {
- p := mallocgc(uintptr(size), nil, flagNoScan|flagNoZero)
-
- (*stringStruct)(unsafe.Pointer(&s)).str = p
- (*stringStruct)(unsafe.Pointer(&s)).len = size
-
- (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p)
- (*slice)(unsafe.Pointer(&b)).len = uint(size)
- (*slice)(unsafe.Pointer(&b)).cap = uint(size)
-
- for {
- ms := maxstring
- if uintptr(size) <= uintptr(ms) || casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) {
- return
- }
- }
-}
-
-// rawbyteslice allocates a new byte slice. The byte slice is not zeroed.
-func rawbyteslice(size int) (b []byte) {
- cap := goroundupsize(uintptr(size))
- p := mallocgc(cap, nil, flagNoScan|flagNoZero)
- if cap != uintptr(size) {
- memclr(add(p, uintptr(size)), cap-uintptr(size))
- }
-
- (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p)
- (*slice)(unsafe.Pointer(&b)).len = uint(size)
- (*slice)(unsafe.Pointer(&b)).cap = uint(cap)
- return
-}
-
-// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
-func rawruneslice(size int) (b []rune) {
- if uintptr(size) > maxmem/4 {
- gothrow("out of memory")
- }
- mem := goroundupsize(uintptr(size) * 4)
- p := mallocgc(mem, nil, flagNoScan|flagNoZero)
- if mem != uintptr(size)*4 {
- memclr(add(p, uintptr(size)*4), mem-uintptr(size)*4)
- }
-
- (*slice)(unsafe.Pointer(&b)).array = (*uint8)(p)
- (*slice)(unsafe.Pointer(&b)).len = uint(size)
- (*slice)(unsafe.Pointer(&b)).cap = uint(mem / 4)
- return
-}
-
-// used by cmd/cgo
-func gobytes(p *byte, n int) []byte {
- if n == 0 {
- return make([]byte, 0)
- }
- x := make([]byte, n)
- memmove(unsafe.Pointer(&x[0]), unsafe.Pointer(p), uintptr(n))
- return x
-}
-
-func gostringsize(n int) string {
- s, _ := rawstring(n)
- return s
-}
-
-//go:noescape
-func findnull(*byte) int
-
-func gostring(p *byte) string {
- l := findnull(p)
- if l == 0 {
- return ""
- }
- s, b := rawstring(l)
- memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
- return s
-}
-
-func gostringn(p *byte, l int) string {
- if l == 0 {
- return ""
- }
- s, b := rawstring(l)
- memmove(unsafe.Pointer(&b[0]), unsafe.Pointer(p), uintptr(l))
- return s
-}
-
-func index(s, t string) int {
- if len(t) == 0 {
- return 0
- }
- for i := 0; i < len(s); i++ {
- if s[i] == t[0] && hasprefix(s[i:], t) {
- return i
- }
- }
- return -1
-}
-
-func contains(s, t string) bool {
- return index(s, t) >= 0
-}
-
-func hasprefix(s, t string) bool {
- return len(s) >= len(t) && s[:len(t)] == t
-}
diff --git a/libgo/go/runtime/string_test.go b/libgo/go/runtime/string_test.go
index df3ff06..71bd830 100644
--- a/libgo/go/runtime/string_test.go
+++ b/libgo/go/runtime/string_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ "strings"
"testing"
)
@@ -75,3 +76,165 @@ func BenchmarkCompareStringBig(b *testing.B) {
}
b.SetBytes(int64(len(s1)))
}
+
+func BenchmarkRuneIterate(b *testing.B) {
+ bytes := make([]byte, 100)
+ for i := range bytes {
+ bytes[i] = byte('A')
+ }
+ s := string(bytes)
+ for i := 0; i < b.N; i++ {
+ for range s {
+ }
+ }
+}
+
+func BenchmarkRuneIterate2(b *testing.B) {
+ bytes := make([]byte, 100)
+ for i := range bytes {
+ bytes[i] = byte('A')
+ }
+ s := string(bytes)
+ for i := 0; i < b.N; i++ {
+ for range s {
+ }
+ }
+}
+
+/*
+func TestStringW(t *testing.T) {
+ strings := []string{
+ "hello",
+ "a\u5566\u7788b",
+ }
+
+ for _, s := range strings {
+ var b []uint16
+ for _, c := range s {
+ b = append(b, uint16(c))
+ if c != rune(uint16(c)) {
+ t.Errorf("bad test: stringW can't handle >16 bit runes")
+ }
+ }
+ b = append(b, 0)
+ r := runtime.GostringW(b)
+ if r != s {
+ t.Errorf("gostringW(%v) = %s, want %s", b, r, s)
+ }
+ }
+}
+*/
+
+func TestLargeStringConcat(t *testing.T) {
+ output := executeTest(t, largeStringConcatSource, nil)
+ want := "panic: " + strings.Repeat("0", 1<<10) + strings.Repeat("1", 1<<10) +
+ strings.Repeat("2", 1<<10) + strings.Repeat("3", 1<<10)
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+}
+
+var largeStringConcatSource = `
+package main
+import "strings"
+func main() {
+ s0 := strings.Repeat("0", 1<<10)
+ s1 := strings.Repeat("1", 1<<10)
+ s2 := strings.Repeat("2", 1<<10)
+ s3 := strings.Repeat("3", 1<<10)
+ s := s0 + s1 + s2 + s3
+ panic(s)
+}
+`
+
+/*
+func TestGostringnocopy(t *testing.T) {
+ max := *runtime.Maxstring
+ b := make([]byte, max+10)
+ for i := uintptr(0); i < max+9; i++ {
+ b[i] = 'a'
+ }
+ _ = runtime.Gostringnocopy(&b[0])
+ newmax := *runtime.Maxstring
+ if newmax != max+9 {
+ t.Errorf("want %d, got %d", max+9, newmax)
+ }
+}
+*/
+
+func TestCompareTempString(t *testing.T) {
+ s := "foo"
+ b := []byte(s)
+ n := testing.AllocsPerRun(1000, func() {
+ if string(b) != s {
+ t.Fatalf("strings are not equal: '%v' and '%v'", string(b), s)
+ }
+ if string(b) == s {
+ } else {
+ t.Fatalf("strings are not equal: '%v' and '%v'", string(b), s)
+ }
+ })
+ if n != 0 {
+ t.Fatalf("want 0 allocs, got %v", n)
+ }
+}
+
+func TestStringOnStack(t *testing.T) {
+ s := ""
+ for i := 0; i < 3; i++ {
+ s = "a" + s + "b" + s + "c"
+ }
+
+ if want := "aaabcbabccbaabcbabccc"; s != want {
+ t.Fatalf("want: '%v', got '%v'", want, s)
+ }
+}
+
+func TestIntString(t *testing.T) {
+ // Non-escaping result of intstring.
+ s := ""
+ for i := 0; i < 4; i++ {
+ s += string(i+'0') + string(i+'0'+1)
+ }
+ if want := "01122334"; s != want {
+ t.Fatalf("want '%v', got '%v'", want, s)
+ }
+
+ // Escaping result of intstring.
+ var a [4]string
+ for i := 0; i < 4; i++ {
+ a[i] = string(i + '0')
+ }
+ s = a[0] + a[1] + a[2] + a[3]
+ if want := "0123"; s != want {
+ t.Fatalf("want '%v', got '%v'", want, s)
+ }
+}
+
+func TestIntStringAllocs(t *testing.T) {
+ unknown := '0'
+ n := testing.AllocsPerRun(1000, func() {
+ s1 := string(unknown)
+ s2 := string(unknown + 1)
+ if s1 == s2 {
+ t.Fatalf("bad")
+ }
+ })
+ if n != 0 {
+ t.Fatalf("want 0 allocs, got %v", n)
+ }
+}
+
+func TestRangeStringCast(t *testing.T) {
+ s := "abc"
+ n := testing.AllocsPerRun(1000, func() {
+ for i, c := range []byte(s) {
+ if c != s[i] {
+ t.Fatalf("want '%c' at pos %v, got '%c'", s[i], i, c)
+ }
+ }
+ })
+ if n != 0 {
+ t.Fatalf("want 0 allocs, got %v", n)
+ }
+}
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
deleted file mode 100644
index fe8f9c9..0000000
--- a/libgo/go/runtime/stubs.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import "unsafe"
-
-// Declarations for runtime services implemented in C or assembly.
-
-const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
-const regSize = 4 << (^uintreg(0) >> 63) // unsafe.Sizeof(uintreg(0)) but an ideal const
-
-// Should be a built-in for unsafe.Pointer?
-//go:nosplit
-func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
- return unsafe.Pointer(uintptr(p) + x)
-}
-
-// n must be a power of 2
-func roundup(p unsafe.Pointer, n uintptr) unsafe.Pointer {
- delta := -uintptr(p) & (n - 1)
- return unsafe.Pointer(uintptr(p) + delta)
-}
-
-// in runtime.c
-func getg() *g
-func acquirem() *m
-func releasem(mp *m)
-func gomcache() *mcache
-func readgstatus(*g) uint32 // proc.c
-
-// mcall switches from the g to the g0 stack and invokes fn(g),
-// where g is the goroutine that made the call.
-// mcall saves g's current PC/SP in g->sched so that it can be restored later.
-// It is up to fn to arrange for that later execution, typically by recording
-// g in a data structure, causing something to call ready(g) later.
-// mcall returns to the original goroutine g later, when g has been rescheduled.
-// fn must not return at all; typically it ends by calling schedule, to let the m
-// run other goroutines.
-//
-// mcall can only be called from g stacks (not g0, not gsignal).
-//go:noescape
-func mcall(fn func(*g))
-
-// onM switches from the g to the g0 stack and invokes fn().
-// When fn returns, onM switches back to the g and returns,
-// continuing execution on the g stack.
-// If arguments must be passed to fn, they can be written to
-// g->m->ptrarg (pointers) and g->m->scalararg (non-pointers)
-// before the call and then consulted during fn.
-// Similarly, fn can pass return values back in those locations.
-// If fn is written in Go, it can be a closure, which avoids the need for
-// ptrarg and scalararg entirely.
-// After reading values out of ptrarg and scalararg it is conventional
-// to zero them to avoid (memory or information) leaks.
-//
-// If onM is called from a g0 stack, it invokes fn and returns,
-// without any stack switches.
-//
-// If onM is called from a gsignal stack, it crashes the program.
-// The implication is that functions used in signal handlers must
-// not use onM.
-//
-// NOTE(rsc): We could introduce a separate onMsignal that is
-// like onM but if called from a gsignal stack would just run fn on
-// that stack. The caller of onMsignal would be required to save the
-// old values of ptrarg/scalararg and restore them when the call
-// was finished, in case the signal interrupted an onM sequence
-// in progress on the g or g0 stacks. Until there is a clear need for this,
-// we just reject onM in signal handling contexts entirely.
-//
-//go:noescape
-func onM(fn func())
-
-// onMsignal is like onM but is allowed to be used in code that
-// might run on the gsignal stack. Code running on a signal stack
-// may be interrupting an onM sequence on the main stack, so
-// if the onMsignal calling sequence writes to ptrarg/scalararg,
-// it must first save the old values and then restore them when
-// finished. As an exception to the rule, it is fine not to save and
-// restore the values if the program is trying to crash rather than
-// return from the signal handler.
-// Once all the runtime is written in Go, there will be no ptrarg/scalararg
-// and the distinction between onM and onMsignal (and perhaps mcall)
-// can go away.
-//
-// If onMsignal is called from a gsignal stack, it invokes fn directly,
-// without a stack switch. Otherwise onMsignal behaves like onM.
-//
-//go:noescape
-func onM_signalok(fn func())
-
-func badonm() {
- gothrow("onM called from signal goroutine")
-}
-
-// C functions that run on the M stack.
-// Call using mcall.
-func gosched_m(*g)
-func park_m(*g)
-func recovery_m(*g)
-
-// More C functions that run on the M stack.
-// Call using onM.
-func mcacheRefill_m()
-func largeAlloc_m()
-func gc_m()
-func scavenge_m()
-func setFinalizer_m()
-func removeFinalizer_m()
-func markallocated_m()
-func unrollgcprog_m()
-func unrollgcproginplace_m()
-func setgcpercent_m()
-func setmaxthreads_m()
-func ready_m()
-func deferproc_m()
-func goexit_m()
-func startpanic_m()
-func dopanic_m()
-func readmemstats_m()
-func writeheapdump_m()
-
-// memclr clears n bytes starting at ptr.
-// in memclr_*.s
-//go:noescape
-func memclr(ptr unsafe.Pointer, n uintptr)
-
-// memmove copies n bytes from "from" to "to".
-// in memmove_*.s
-//go:noescape
-func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
-
-func starttheworld()
-func stoptheworld()
-func newextram()
-func lockOSThread()
-func unlockOSThread()
-
-// exported value for testing
-var hashLoad = loadFactor
-
-// in asm_*.s
-func fastrand1() uint32
-
-// in asm_*.s
-//go:noescape
-func memeq(a, b unsafe.Pointer, size uintptr) bool
-
-// noescape hides a pointer from escape analysis. noescape is
-// the identity function but escape analysis doesn't think the
-// output depends on the input. noescape is inlined and currently
-// compiles down to a single xor instruction.
-// USE CAREFULLY!
-//go:nosplit
-func noescape(p unsafe.Pointer) unsafe.Pointer {
- x := uintptr(p)
- return unsafe.Pointer(x ^ 0)
-}
-
-func entersyscall()
-func reentersyscall(pc uintptr, sp unsafe.Pointer)
-func entersyscallblock()
-func exitsyscall()
-
-func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
-func gogo(buf *gobuf)
-func gosave(buf *gobuf)
-func read(fd int32, p unsafe.Pointer, n int32) int32
-func close(fd int32) int32
-func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
-
-//go:noescape
-func jmpdefer(fv *funcval, argp uintptr)
-func exit1(code int32)
-func asminit()
-func setg(gg *g)
-func exit(code int32)
-func breakpoint()
-func nanotime() int64
-func usleep(usec uint32)
-
-// careful: cputicks is not guaranteed to be monotonic! In particular, we have
-// noticed drift between cpus on certain os/arch combinations. See issue 8976.
-func cputicks() int64
-
-func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
-func munmap(addr unsafe.Pointer, n uintptr)
-func madvise(addr unsafe.Pointer, n uintptr, flags int32)
-func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
-func osyield()
-func procyield(cycles uint32)
-func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
-func readgogc() int32
-func purgecachedstats(c *mcache)
-func gostringnocopy(b *byte) string
-func goexit()
-
-//go:noescape
-func write(fd uintptr, p unsafe.Pointer, n int32) int32
-
-//go:noescape
-func cas(ptr *uint32, old, new uint32) bool
-
-//go:noescape
-func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
-
-//go:noescape
-func casuintptr(ptr *uintptr, old, new uintptr) bool
-
-//go:noescape
-func atomicstoreuintptr(ptr *uintptr, new uintptr)
-
-//go:noescape
-func atomicloaduintptr(ptr *uintptr) uintptr
-
-//go:noescape
-func atomicloaduint(ptr *uint) uint
-
-//go:noescape
-func setcallerpc(argp unsafe.Pointer, pc uintptr)
-
-// getcallerpc returns the program counter (PC) of its caller's caller.
-// getcallersp returns the stack pointer (SP) of its caller's caller.
-// For both, the argp must be a pointer to the caller's first function argument.
-// The implementation may or may not use argp, depending on
-// the architecture.
-//
-// For example:
-//
-// func f(arg1, arg2, arg3 int) {
-// pc := getcallerpc(unsafe.Pointer(&arg1))
-// sp := getcallerpc(unsafe.Pointer(&arg2))
-// }
-//
-// These two lines find the PC and SP immediately following
-// the call to f (where f will return).
-//
-// The call to getcallerpc and getcallersp must be done in the
-// frame being asked about. It would not be correct for f to pass &arg1
-// to another function g and let g call getcallerpc/getcallersp.
-// The call inside g might return information about g's caller or
-// information about f's caller or complete garbage.
-//
-// The result of getcallersp is correct at the time of the return,
-// but it may be invalidated by any subsequent call to a function
-// that might relocate the stack in order to grow or shrink it.
-// A general rule is that the result of getcallersp should be used
-// immediately and can only be passed to nosplit functions.
-
-//go:noescape
-func getcallerpc(argp unsafe.Pointer) uintptr
-
-//go:noescape
-func getcallersp(argp unsafe.Pointer) uintptr
-
-//go:noescape
-func asmcgocall(fn, arg unsafe.Pointer)
-
-//go:noescape
-func asmcgocall_errno(fn, arg unsafe.Pointer) int32
-
-//go:noescape
-func open(name *byte, mode, perm int32) int32
-
-//go:noescape
-func gotraceback(*bool) int32
-
-const _NoArgs = ^uintptr(0)
-
-func newstack()
-func newproc()
-func morestack()
-func mstart()
-func rt0_go()
-
-// return0 is a stub used to return 0 from deferproc.
-// It is called at the very end of deferproc to signal
-// the calling Go function that it should not jump
-// to deferreturn.
-// in asm_*.s
-func return0()
-
-// thunk to call time.now.
-func timenow() (sec int64, nsec int32)
-
-// in asm_*.s
-// not called directly; definitions here supply type information for traceback.
-func call16(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call32(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call64(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call128(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call256(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call512(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call1024(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call2048(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call4096(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call8192(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call16384(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call32768(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call65536(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call131072(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call262144(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call524288(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call1048576(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call2097152(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call4194304(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call8388608(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call16777216(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call33554432(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call67108864(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call134217728(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32)
-func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32)
diff --git a/libgo/go/runtime/symtab_test.go b/libgo/go/runtime/symtab_test.go
index 0db63c3..8c8281f 100644
--- a/libgo/go/runtime/symtab_test.go
+++ b/libgo/go/runtime/symtab_test.go
@@ -12,9 +12,8 @@ import (
var _ = runtime.Caller
var _ = strings.HasSuffix
-type _ testing.T
-/* runtime.Caller is not fully implemented for gccgo.
+type _ testing.T
func TestCaller(t *testing.T) {
procs := runtime.GOMAXPROCS(-1)
@@ -42,8 +41,9 @@ func testCallerBar(t *testing.T) {
f := runtime.FuncForPC(pc)
if !ok ||
!strings.HasSuffix(file, "symtab_test.go") ||
- (i == 0 && !strings.HasSuffix(f.Name(), "testCallerBar")) ||
- (i == 1 && !strings.HasSuffix(f.Name(), "testCallerFoo")) ||
+ // FuncForPC doesn't work gccgo, because of inlining.
+ // (i == 0 && !strings.HasSuffix(f.Name(), "testCallerBar")) ||
+ // (i == 1 && !strings.HasSuffix(f.Name(), "testCallerFoo")) ||
line < 5 || line > 1000 ||
f.Entry() >= pc {
t.Errorf("incorrect symbol info %d: %t %d %d %s %s %d",
@@ -52,4 +52,107 @@ func testCallerBar(t *testing.T) {
}
}
-*/
+func lineNumber() int {
+ _, _, line, _ := runtime.Caller(1)
+ return line // return 0 for error
+}
+
+// Do not add/remove lines in this block without updating the line numbers.
+var firstLine = lineNumber() // 0
+var ( // 1
+ lineVar1 = lineNumber() // 2
+ lineVar2a, lineVar2b = lineNumber(), lineNumber() // 3
+) // 4
+var compLit = []struct { // 5
+ lineA, lineB int // 6
+}{ // 7
+ { // 8
+ lineNumber(), lineNumber(), // 9
+ }, // 10
+ { // 11
+ lineNumber(), // 12
+ lineNumber(), // 13
+ }, // 14
+ { // 15
+ lineB: lineNumber(), // 16
+ lineA: lineNumber(), // 17
+ }, // 18
+} // 19
+var arrayLit = [...]int{lineNumber(), // 20
+ lineNumber(), lineNumber(), // 21
+ lineNumber(), // 22
+} // 23
+var sliceLit = []int{lineNumber(), // 24
+ lineNumber(), lineNumber(), // 25
+ lineNumber(), // 26
+} // 27
+var mapLit = map[int]int{ // 28
+ 29: lineNumber(), // 29
+ 30: lineNumber(), // 30
+ lineNumber(): 31, // 31
+ lineNumber(): 32, // 32
+} // 33
+var intLit = lineNumber() + // 34
+ lineNumber() + // 35
+ lineNumber() // 36
+func trythis() { // 37
+ recordLines(lineNumber(), // 38
+ lineNumber(), // 39
+ lineNumber()) // 40
+}
+
+// Modifications below this line are okay.
+
+var l38, l39, l40 int
+
+func recordLines(a, b, c int) {
+ l38 = a
+ l39 = b
+ l40 = c
+}
+
+func TestLineNumber(t *testing.T) {
+ trythis()
+ for _, test := range []struct {
+ name string
+ val int
+ want int
+ }{
+ {"firstLine", firstLine, 0},
+ {"lineVar1", lineVar1, 2},
+ {"lineVar2a", lineVar2a, 3},
+ {"lineVar2b", lineVar2b, 3},
+ {"compLit[0].lineA", compLit[0].lineA, 9},
+ {"compLit[0].lineB", compLit[0].lineB, 9},
+ {"compLit[1].lineA", compLit[1].lineA, 12},
+ {"compLit[1].lineB", compLit[1].lineB, 13},
+ {"compLit[2].lineA", compLit[2].lineA, 17},
+ {"compLit[2].lineB", compLit[2].lineB, 16},
+
+ {"arrayLit[0]", arrayLit[0], 20},
+ {"arrayLit[1]", arrayLit[1], 21},
+ {"arrayLit[2]", arrayLit[2], 21},
+ {"arrayLit[3]", arrayLit[3], 22},
+
+ {"sliceLit[0]", sliceLit[0], 24},
+ {"sliceLit[1]", sliceLit[1], 25},
+ {"sliceLit[2]", sliceLit[2], 25},
+ {"sliceLit[3]", sliceLit[3], 26},
+
+ {"mapLit[29]", mapLit[29], 29},
+ {"mapLit[30]", mapLit[30], 30},
+ {"mapLit[31]", mapLit[31+firstLine] + firstLine, 31}, // nb it's the key not the value
+ {"mapLit[32]", mapLit[32+firstLine] + firstLine, 32}, // nb it's the key not the value
+
+ {"intLit", intLit - 2*firstLine, 34 + 35 + 36},
+
+ {"l38", l38, 38},
+ {"l39", l39, 39},
+ {"l40", l40, 40},
+ } {
+ if got := test.val - firstLine; got != test.want {
+ t.Errorf("%s on firstLine+%d want firstLine+%d (firstLine=%d, val=%d)",
+ test.name, got, test.want, firstLine, test.val)
+ }
+ }
+}
diff --git a/libgo/go/runtime/syscall_windows.go b/libgo/go/runtime/syscall_windows.go
deleted file mode 100644
index 51004b7..0000000
--- a/libgo/go/runtime/syscall_windows.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-import (
- "unsafe"
-)
-
-const _SIGPROF = 0 // dummy value for badsignal
-
-type callbacks struct {
- lock mutex
- ctxt [cb_max]*wincallbackcontext
- n int
-}
-
-func (c *wincallbackcontext) isCleanstack() bool {
- return c.cleanstack
-}
-
-func (c *wincallbackcontext) setCleanstack(cleanstack bool) {
- c.cleanstack = cleanstack
-}
-
-var (
- cbs callbacks
- cbctxts **wincallbackcontext = &cbs.ctxt[0] // to simplify access to cbs.ctxt in sys_windows_*.s
-
- callbackasm byte // type isn't really byte, it's code in runtime
-)
-
-// callbackasmAddr returns address of runtime.callbackasm
-// function adjusted by i.
-// runtime.callbackasm is just a series of CALL instructions
-// (each is 5 bytes long), and we want callback to arrive at
-// correspondent call instruction instead of start of
-// runtime.callbackasm.
-func callbackasmAddr(i int) uintptr {
- return uintptr(add(unsafe.Pointer(&callbackasm), uintptr(i*5)))
-}
-
-func compileCallback(fn eface, cleanstack bool) (code uintptr) {
- if fn._type == nil || (fn._type.kind&kindMask) != kindFunc {
- panic("compilecallback: not a function")
- }
- ft := (*functype)(unsafe.Pointer(fn._type))
- if len(ft.out) != 1 {
- panic("compilecallback: function must have one output parameter")
- }
- uintptrSize := unsafe.Sizeof(uintptr(0))
- if t := (**_type)(unsafe.Pointer(&ft.out[0])); (*t).size != uintptrSize {
- panic("compilecallback: output parameter size is wrong")
- }
- argsize := uintptr(0)
- if len(ft.in) > 0 {
- for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] {
- if (*t).size > uintptrSize {
- panic("compilecallback: input parameter size is wrong")
- }
- argsize += uintptrSize
- }
- }
-
- lock(&cbs.lock)
- defer unlock(&cbs.lock)
-
- n := cbs.n
- for i := 0; i < n; i++ {
- if cbs.ctxt[i].gobody == fn.data && cbs.ctxt[i].isCleanstack() == cleanstack {
- return callbackasmAddr(i)
- }
- }
- if n >= cb_max {
- gothrow("too many callback functions")
- }
-
- c := new(wincallbackcontext)
- c.gobody = fn.data
- c.argsize = argsize
- c.setCleanstack(cleanstack)
- if cleanstack && argsize != 0 {
- c.restorestack = argsize
- } else {
- c.restorestack = 0
- }
- cbs.ctxt[n] = c
- cbs.n++
-
- return callbackasmAddr(n)
-}
-
-func getLoadLibrary() uintptr
-
-//go:nosplit
-func syscall_loadlibrary(filename *uint16) (handle, err uintptr) {
- var c libcall
- c.fn = getLoadLibrary()
- c.n = 1
- c.args = uintptr(unsafe.Pointer(&filename))
- cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c))
- handle = c.r1
- if handle == 0 {
- err = c.err
- }
- return
-}
-
-func getGetProcAddress() uintptr
-
-//go:nosplit
-func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) {
- var c libcall
- c.fn = getGetProcAddress()
- c.n = 2
- c.args = uintptr(unsafe.Pointer(&handle))
- cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c))
- outhandle = c.r1
- if outhandle == 0 {
- err = c.err
- }
- return
-}
-
-//go:nosplit
-func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) {
- var c libcall
- c.fn = fn
- c.n = nargs
- c.args = uintptr(unsafe.Pointer(&a1))
- cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c))
- return c.r1, c.r2, c.err
-}
-
-//go:nosplit
-func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) {
- var c libcall
- c.fn = fn
- c.n = nargs
- c.args = uintptr(unsafe.Pointer(&a1))
- cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c))
- return c.r1, c.r2, c.err
-}
-
-//go:nosplit
-func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) {
- var c libcall
- c.fn = fn
- c.n = nargs
- c.args = uintptr(unsafe.Pointer(&a1))
- cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c))
- return c.r1, c.r2, c.err
-}
-
-//go:nosplit
-func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) {
- var c libcall
- c.fn = fn
- c.n = nargs
- c.args = uintptr(unsafe.Pointer(&a1))
- cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c))
- return c.r1, c.r2, c.err
-}
-
-//go:nosplit
-func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) {
- var c libcall
- c.fn = fn
- c.n = nargs
- c.args = uintptr(unsafe.Pointer(&a1))
- cgocall_errno(unsafe.Pointer(funcPC(asmstdcall)), unsafe.Pointer(&c))
- return c.r1, c.r2, c.err
-}
diff --git a/libgo/go/runtime/time.go b/libgo/go/runtime/time.go
deleted file mode 100644
index 11862c7..0000000
--- a/libgo/go/runtime/time.go
+++ /dev/null
@@ -1,289 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Time-related runtime and pieces of package time.
-
-package runtime
-
-import "unsafe"
-
-// Package time knows the layout of this structure.
-// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
-// For GOOS=nacl, package syscall knows the layout of this structure.
-// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
-type timer struct {
- i int // heap index
-
- // Timer wakes up at when, and then at when+period, ... (period > 0 only)
- // each time calling f(now, arg) in the timer goroutine, so f must be
- // a well-behaved function and not block.
- when int64
- period int64
- f func(interface{}, uintptr)
- arg interface{}
- seq uintptr
-}
-
-var timers struct {
- lock mutex
- gp *g
- created bool
- sleeping bool
- rescheduling bool
- waitnote note
- t []*timer
-}
-
-// nacl fake time support - time in nanoseconds since 1970
-var faketime int64
-
-// Package time APIs.
-// Godoc uses the comments in package time, not these.
-
-// time.now is implemented in assembly.
-
-// Sleep puts the current goroutine to sleep for at least ns nanoseconds.
-func timeSleep(ns int64) {
- if ns <= 0 {
- return
- }
-
- t := new(timer)
- t.when = nanotime() + ns
- t.f = goroutineReady
- t.arg = getg()
- lock(&timers.lock)
- addtimerLocked(t)
- goparkunlock(&timers.lock, "sleep")
-}
-
-// startTimer adds t to the timer heap.
-func startTimer(t *timer) {
- if raceenabled {
- racerelease(unsafe.Pointer(t))
- }
- addtimer(t)
-}
-
-// stopTimer removes t from the timer heap if it is there.
-// It returns true if t was removed, false if t wasn't even there.
-func stopTimer(t *timer) bool {
- return deltimer(t)
-}
-
-// Go runtime.
-
-// Ready the goroutine arg.
-func goroutineReady(arg interface{}, seq uintptr) {
- goready(arg.(*g))
-}
-
-func addtimer(t *timer) {
- lock(&timers.lock)
- addtimerLocked(t)
- unlock(&timers.lock)
-}
-
-// Add a timer to the heap and start or kick the timer proc.
-// If the new timer is earlier than any of the others.
-// Timers are locked.
-func addtimerLocked(t *timer) {
- // when must never be negative; otherwise timerproc will overflow
- // during its delta calculation and never expire other runtime·timers.
- if t.when < 0 {
- t.when = 1<<63 - 1
- }
- t.i = len(timers.t)
- timers.t = append(timers.t, t)
- siftupTimer(t.i)
- if t.i == 0 {
- // siftup moved to top: new earliest deadline.
- if timers.sleeping {
- timers.sleeping = false
- notewakeup(&timers.waitnote)
- }
- if timers.rescheduling {
- timers.rescheduling = false
- goready(timers.gp)
- }
- }
- if !timers.created {
- timers.created = true
- go timerproc()
- }
-}
-
-// Delete timer t from the heap.
-// Do not need to update the timerproc: if it wakes up early, no big deal.
-func deltimer(t *timer) bool {
- // Dereference t so that any panic happens before the lock is held.
- // Discard result, because t might be moving in the heap.
- _ = t.i
-
- lock(&timers.lock)
- // t may not be registered anymore and may have
- // a bogus i (typically 0, if generated by Go).
- // Verify it before proceeding.
- i := t.i
- last := len(timers.t) - 1
- if i < 0 || i > last || timers.t[i] != t {
- unlock(&timers.lock)
- return false
- }
- if i != last {
- timers.t[i] = timers.t[last]
- timers.t[i].i = i
- }
- timers.t[last] = nil
- timers.t = timers.t[:last]
- if i != last {
- siftupTimer(i)
- siftdownTimer(i)
- }
- unlock(&timers.lock)
- return true
-}
-
-// Timerproc runs the time-driven events.
-// It sleeps until the next event in the timers heap.
-// If addtimer inserts a new earlier event, addtimer1 wakes timerproc early.
-func timerproc() {
- timers.gp = getg()
- timers.gp.issystem = true
- for {
- lock(&timers.lock)
- timers.sleeping = false
- now := nanotime()
- delta := int64(-1)
- for {
- if len(timers.t) == 0 {
- delta = -1
- break
- }
- t := timers.t[0]
- delta = t.when - now
- if delta > 0 {
- break
- }
- if t.period > 0 {
- // leave in heap but adjust next time to fire
- t.when += t.period * (1 + -delta/t.period)
- siftdownTimer(0)
- } else {
- // remove from heap
- last := len(timers.t) - 1
- if last > 0 {
- timers.t[0] = timers.t[last]
- timers.t[0].i = 0
- }
- timers.t[last] = nil
- timers.t = timers.t[:last]
- if last > 0 {
- siftdownTimer(0)
- }
- t.i = -1 // mark as removed
- }
- f := t.f
- arg := t.arg
- seq := t.seq
- unlock(&timers.lock)
- if raceenabled {
- raceacquire(unsafe.Pointer(t))
- }
- f(arg, seq)
- lock(&timers.lock)
- }
- if delta < 0 || faketime > 0 {
- // No timers left - put goroutine to sleep.
- timers.rescheduling = true
- goparkunlock(&timers.lock, "timer goroutine (idle)")
- continue
- }
- // At least one timer pending. Sleep until then.
- timers.sleeping = true
- noteclear(&timers.waitnote)
- unlock(&timers.lock)
- notetsleepg(&timers.waitnote, delta)
- }
-}
-
-func timejump() *g {
- if faketime == 0 {
- return nil
- }
-
- lock(&timers.lock)
- if !timers.created || len(timers.t) == 0 {
- unlock(&timers.lock)
- return nil
- }
-
- var gp *g
- if faketime < timers.t[0].when {
- faketime = timers.t[0].when
- if timers.rescheduling {
- timers.rescheduling = false
- gp = timers.gp
- }
- }
- unlock(&timers.lock)
- return gp
-}
-
-// Heap maintenance algorithms.
-
-func siftupTimer(i int) {
- t := timers.t
- when := t[i].when
- tmp := t[i]
- for i > 0 {
- p := (i - 1) / 4 // parent
- if when >= t[p].when {
- break
- }
- t[i] = t[p]
- t[i].i = i
- t[p] = tmp
- t[p].i = p
- i = p
- }
-}
-
-func siftdownTimer(i int) {
- t := timers.t
- n := len(t)
- when := t[i].when
- tmp := t[i]
- for {
- c := i*4 + 1 // left child
- c3 := c + 2 // mid child
- if c >= n {
- break
- }
- w := t[c].when
- if c+1 < n && t[c+1].when < w {
- w = t[c+1].when
- c++
- }
- if c3 < n {
- w3 := t[c3].when
- if c3+1 < n && t[c3+1].when < w3 {
- w3 = t[c3+1].when
- c3++
- }
- if w3 < w {
- w = w3
- c = c3
- }
- }
- if w >= when {
- break
- }
- t[i] = t[c]
- t[i].i = i
- t[c] = tmp
- t[c].i = c
- i = c
- }
-}
diff --git a/libgo/go/runtime/typekind.go b/libgo/go/runtime/typekind.go
deleted file mode 100644
index b64ec44..0000000
--- a/libgo/go/runtime/typekind.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime
-
-const (
- kindBool = _KindBool
- kindInt = _KindInt
- kindInt8 = _KindInt8
- kindInt16 = _KindInt16
- kindInt32 = _KindInt32
- kindInt64 = _KindInt64
- kindUint = _KindUint
- kindUint8 = _KindUint8
- kindUint16 = _KindUint16
- kindUint32 = _KindUint32
- kindUint64 = _KindUint64
- kindUintptr = _KindUintptr
- kindFloat32 = _KindFloat32
- kindFloat64 = _KindFloat64
- kindComplex64 = _KindComplex64
- kindComplex128 = _KindComplex128
- kindArray = _KindArray
- kindChan = _KindChan
- kindFunc = _KindFunc
- kindInterface = _KindInterface
- kindMap = _KindMap
- kindPtr = _KindPtr
- kindSlice = _KindSlice
- kindString = _KindString
- kindStruct = _KindStruct
- kindUnsafePointer = _KindUnsafePointer
-
- kindDirectIface = _KindDirectIface
- kindGCProg = _KindGCProg
- kindNoPointers = _KindNoPointers
- kindMask = _KindMask
-)
-
-// isDirectIface reports whether t is stored directly in an interface value.
-func isDirectIface(t *_type) bool {
- return t.kind&kindDirectIface != 0
-}
diff --git a/libgo/go/runtime/vlop_arm_test.go b/libgo/go/runtime/vlop_arm_test.go
deleted file mode 100644
index cd28419..0000000
--- a/libgo/go/runtime/vlop_arm_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package runtime_test
-
-import "testing"
-
-// arm soft division benchmarks adapted from
-// http://ridiculousfish.com/files/division_benchmarks.tar.gz
-
-const numeratorsSize = 1 << 21
-
-var numerators = randomNumerators()
-
-type randstate struct {
- hi, lo uint32
-}
-
-func (r *randstate) rand() uint32 {
- r.hi = r.hi<<16 + r.hi>>16
- r.hi += r.lo
- r.lo += r.hi
- return r.hi
-}
-
-func randomNumerators() []uint32 {
- numerators := make([]uint32, numeratorsSize)
- random := &randstate{2147483563, 2147483563 ^ 0x49616E42}
- for i := range numerators {
- numerators[i] = random.rand()
- }
- return numerators
-}
-
-func bmUint32Div(divisor uint32, b *testing.B) {
- var sum uint32
- for i := 0; i < b.N; i++ {
- sum += numerators[i&(numeratorsSize-1)] / divisor
- }
-}
-
-func BenchmarkUint32Div7(b *testing.B) { bmUint32Div(7, b) }
-func BenchmarkUint32Div37(b *testing.B) { bmUint32Div(37, b) }
-func BenchmarkUint32Div123(b *testing.B) { bmUint32Div(123, b) }
-func BenchmarkUint32Div763(b *testing.B) { bmUint32Div(763, b) }
-func BenchmarkUint32Div1247(b *testing.B) { bmUint32Div(1247, b) }
-func BenchmarkUint32Div9305(b *testing.B) { bmUint32Div(9305, b) }
-func BenchmarkUint32Div13307(b *testing.B) { bmUint32Div(13307, b) }
-func BenchmarkUint32Div52513(b *testing.B) { bmUint32Div(52513, b) }
-func BenchmarkUint32Div60978747(b *testing.B) { bmUint32Div(60978747, b) }
-func BenchmarkUint32Div106956295(b *testing.B) { bmUint32Div(106956295, b) }
-
-func bmUint32Mod(divisor uint32, b *testing.B) {
- var sum uint32
- for i := 0; i < b.N; i++ {
- sum += numerators[i&(numeratorsSize-1)] % divisor
- }
-}
-
-func BenchmarkUint32Mod7(b *testing.B) { bmUint32Mod(7, b) }
-func BenchmarkUint32Mod37(b *testing.B) { bmUint32Mod(37, b) }
-func BenchmarkUint32Mod123(b *testing.B) { bmUint32Mod(123, b) }
-func BenchmarkUint32Mod763(b *testing.B) { bmUint32Mod(763, b) }
-func BenchmarkUint32Mod1247(b *testing.B) { bmUint32Mod(1247, b) }
-func BenchmarkUint32Mod9305(b *testing.B) { bmUint32Mod(9305, b) }
-func BenchmarkUint32Mod13307(b *testing.B) { bmUint32Mod(13307, b) }
-func BenchmarkUint32Mod52513(b *testing.B) { bmUint32Mod(52513, b) }
-func BenchmarkUint32Mod60978747(b *testing.B) { bmUint32Mod(60978747, b) }
-func BenchmarkUint32Mod106956295(b *testing.B) { bmUint32Mod(106956295, b) }