aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/cgo_mmap.go34
-rw-r--r--libgo/go/runtime/cgo_ppc64x.go12
-rw-r--r--libgo/go/runtime/cgocheck.go243
-rw-r--r--libgo/go/runtime/crash_cgo_test.go408
-rw-r--r--libgo/go/runtime/crash_test.go432
-rw-r--r--libgo/go/runtime/crash_unix_test.go30
-rw-r--r--libgo/go/runtime/debug.go6
-rw-r--r--libgo/go/runtime/debug/garbage.go11
-rw-r--r--libgo/go/runtime/debug/garbage_test.go5
-rw-r--r--libgo/go/runtime/debug/heapdump_test.go3
-rw-r--r--libgo/go/runtime/debug/stack.go86
-rw-r--r--libgo/go/runtime/debug/stack_test.go33
-rw-r--r--libgo/go/runtime/defs_linux_mips64x.go183
-rw-r--r--libgo/go/runtime/export_test.go21
-rw-r--r--libgo/go/runtime/export_windows_test.go8
-rw-r--r--libgo/go/runtime/extern.go45
-rw-r--r--libgo/go/runtime/fastlog2.go33
-rw-r--r--libgo/go/runtime/fastlog2_test.go36
-rw-r--r--libgo/go/runtime/fastlog2table.go43
-rw-r--r--libgo/go/runtime/gc_test.go87
-rw-r--r--libgo/go/runtime/gcinfo_test.go2
-rw-r--r--libgo/go/runtime/lfstack_linux_mips64x.go32
-rw-r--r--libgo/go/runtime/malloc_test.go30
-rw-r--r--libgo/go/runtime/mkfastlog2table.go52
-rw-r--r--libgo/go/runtime/mmap.go16
-rw-r--r--libgo/go/runtime/msan.go55
-rw-r--r--libgo/go/runtime/msan/msan.go32
-rw-r--r--libgo/go/runtime/msan0.go22
-rw-r--r--libgo/go/runtime/mstkbar.go365
-rw-r--r--libgo/go/runtime/os1_linux_generic.go27
-rw-r--r--libgo/go/runtime/os1_linux_mips64x.go26
-rw-r--r--libgo/go/runtime/os2_linux_generic.go29
-rw-r--r--libgo/go/runtime/os2_linux_mips64x.go25
-rw-r--r--libgo/go/runtime/os_android.go15
-rw-r--r--libgo/go/runtime/os_linux_mips64x.go18
-rw-r--r--libgo/go/runtime/pprof/mprof_test.go16
-rw-r--r--libgo/go/runtime/pprof/pprof.go12
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go174
-rw-r--r--libgo/go/runtime/print.go221
-rw-r--r--libgo/go/runtime/proc_test.go134
-rw-r--r--libgo/go/runtime/race/testdata/issue12225_test.go20
-rw-r--r--libgo/go/runtime/race/testdata/issue12664_test.go76
-rw-r--r--libgo/go/runtime/race/testdata/issue13264_test.go13
-rw-r--r--libgo/go/runtime/runtime_test.go19
-rw-r--r--libgo/go/runtime/signal2_unix.go69
-rw-r--r--libgo/go/runtime/signal_linux_mips64x.go70
-rw-r--r--libgo/go/runtime/signal_mips64x.go188
-rw-r--r--libgo/go/runtime/signal_sigtramp.go50
-rw-r--r--libgo/go/runtime/sigtab_linux_generic.go82
-rw-r--r--libgo/go/runtime/sigtab_linux_mips64x.go81
-rw-r--r--libgo/go/runtime/stack.go1068
-rw-r--r--libgo/go/runtime/string_test.go15
-rw-r--r--libgo/go/runtime/sys_mips64x.go43
-rw-r--r--libgo/go/runtime/sys_nonppc64x.go10
-rw-r--r--libgo/go/runtime/testdata/testprog/crash.go45
-rw-r--r--libgo/go/runtime/testdata/testprog/deadlock.go173
-rw-r--r--libgo/go/runtime/testdata/testprog/gc.go74
-rw-r--r--libgo/go/runtime/testdata/testprog/main.go35
-rw-r--r--libgo/go/runtime/testdata/testprog/misc.go15
-rw-r--r--libgo/go/runtime/testdata/testprog/signal.go17
-rw-r--r--libgo/go/runtime/testdata/testprog/stringconcat.go20
-rw-r--r--libgo/go/runtime/testdata/testprog/syscall_windows.go27
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/callback.go89
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/cgo.go80
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/crash.go45
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/dll_windows.go25
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/dropm.go59
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/dropm_stub.go11
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/exec.go89
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/main.go35
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/threadpanic.go24
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/threadprof.go93
-rw-r--r--libgo/go/runtime/testdata/testprogcgo/windows/win.go16
-rw-r--r--libgo/go/runtime/testdata/testprognet/main.go35
-rw-r--r--libgo/go/runtime/testdata/testprognet/net.go29
-rw-r--r--libgo/go/runtime/testdata/testprognet/signal.go26
-rw-r--r--libgo/go/runtime/write_err.go13
-rw-r--r--libgo/go/runtime/write_err_android.go160
78 files changed, 5117 insertions, 984 deletions
diff --git a/libgo/go/runtime/cgo_mmap.go b/libgo/go/runtime/cgo_mmap.go
new file mode 100644
index 0000000..ef5501c
--- /dev/null
+++ b/libgo/go/runtime/cgo_mmap.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Support for memory sanitizer. See runtime/cgo/mmap.go.
+
+// +build linux,amd64
+
+package runtime
+
+import "unsafe"
+
+// _cgo_mmap is filled in by runtime/cgo when it is linked into the
+// program, so it is only non-nil when using cgo.
+//go:linkname _cgo_mmap _cgo_mmap
+var _cgo_mmap unsafe.Pointer
+
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) (ret unsafe.Pointer) {
+ if _cgo_mmap != nil {
+ systemstack(func() {
+ ret = callCgoMmap(addr, n, prot, flags, fd, off)
+ })
+ return
+ }
+ return sysMmap(addr, n, prot, flags, fd, off)
+}
+
+// sysMmap calls the mmap system call. It is implemented in assembly.
+func sysMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
+
+// cgoMmap calls the mmap function in the runtime/cgo package on the
+// callCgoMmap calls the mmap function in the runtime/cgo package
+// using the GCC calling convention. It is implemented in assembly.
+func callCgoMmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
diff --git a/libgo/go/runtime/cgo_ppc64x.go b/libgo/go/runtime/cgo_ppc64x.go
new file mode 100644
index 0000000..6a1b3bb
--- /dev/null
+++ b/libgo/go/runtime/cgo_ppc64x.go
@@ -0,0 +1,12 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ppc64 ppc64le
+
+package runtime
+
+// crosscall_ppc64 calls into the runtime to set up the registers the
+// Go runtime expects and so the symbol it calls needs to be exported
+// for external linking to work.
+//go:cgo_export_static _cgo_reginit
diff --git a/libgo/go/runtime/cgocheck.go b/libgo/go/runtime/cgocheck.go
new file mode 100644
index 0000000..0077e22
--- /dev/null
+++ b/libgo/go/runtime/cgocheck.go
@@ -0,0 +1,243 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code to check that pointer writes follow the cgo rules.
+// These functions are invoked via the write barrier when debug.cgocheck > 1.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const cgoWriteBarrierFail = "Go pointer stored into non-Go memory"
+
+// cgoCheckWriteBarrier is called whenever a pointer is stored into memory.
+// It throws if the program is storing a Go pointer into non-Go memory.
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckWriteBarrier(dst *uintptr, src uintptr) {
+ if !cgoIsGoPointer(unsafe.Pointer(src)) {
+ return
+ }
+ if cgoIsGoPointer(unsafe.Pointer(dst)) {
+ return
+ }
+
+ // If we are running on the system stack then dst might be an
+ // address on the stack, which is OK.
+ g := getg()
+ if g == g.m.g0 || g == g.m.gsignal {
+ return
+ }
+
+ // Allocating memory can write to various mfixalloc structs
+ // that look like they are non-Go memory.
+ if g.m.mallocing != 0 {
+ return
+ }
+
+ systemstack(func() {
+ println("write of Go pointer", hex(src), "to non-Go memory", hex(uintptr(unsafe.Pointer(dst))))
+ throw(cgoWriteBarrierFail)
+ })
+}
+
+// cgoCheckMemmove is called when moving a block of memory.
+// dst and src point off bytes into the value to copy.
+// size is the number of bytes to copy.
+// It throws if the program is copying a block that contains a Go pointer
+// into non-Go memory.
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
+ if typ.kind&kindNoPointers != 0 {
+ return
+ }
+ if !cgoIsGoPointer(src) {
+ return
+ }
+ if cgoIsGoPointer(dst) {
+ return
+ }
+ cgoCheckTypedBlock(typ, src, off, size)
+}
+
+// cgoCheckSliceCopy is called when copying n elements of a slice from
+// src to dst. typ is the element type of the slice.
+// It throws if the program is copying slice elements that contain Go pointers
+// into non-Go memory.
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckSliceCopy(typ *_type, dst, src slice, n int) {
+ if typ.kind&kindNoPointers != 0 {
+ return
+ }
+ if !cgoIsGoPointer(src.array) {
+ return
+ }
+ if cgoIsGoPointer(dst.array) {
+ return
+ }
+ p := src.array
+ for i := 0; i < n; i++ {
+ cgoCheckTypedBlock(typ, p, 0, typ.size)
+ p = add(p, typ.size)
+ }
+}
+
+// cgoCheckTypedBlock checks the block of memory at src, for up to size bytes,
+// and throws if it finds a Go pointer. The type of the memory is typ,
+// and src is off bytes into that type.
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckTypedBlock(typ *_type, src unsafe.Pointer, off, size uintptr) {
+ if typ.kind&kindGCProg == 0 {
+ cgoCheckBits(src, typ.gcdata, off, size)
+ return
+ }
+
+ // The type has a GC program. Try to find GC bits somewhere else.
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if cgoInRange(src, datap.data, datap.edata) {
+ doff := uintptr(src) - datap.data
+ cgoCheckBits(add(src, -doff), datap.gcdatamask.bytedata, off+doff, size)
+ return
+ }
+ if cgoInRange(src, datap.bss, datap.ebss) {
+ boff := uintptr(src) - datap.bss
+ cgoCheckBits(add(src, -boff), datap.gcbssmask.bytedata, off+boff, size)
+ return
+ }
+ }
+
+ aoff := uintptr(src) - mheap_.arena_start
+ idx := aoff >> _PageShift
+ s := h_spans[idx]
+ if s.state == _MSpanStack {
+ // There are no heap bits for value stored on the stack.
+ // For a channel receive src might be on the stack of some
+ // other goroutine, so we can't unwind the stack even if
+ // we wanted to.
+ // We can't expand the GC program without extra storage
+ // space we can't easily get.
+ // Fortunately we have the type information.
+ systemstack(func() {
+ cgoCheckUsingType(typ, src, off, size)
+ })
+ return
+ }
+
+ // src must be in the regular heap.
+
+ hbits := heapBitsForAddr(uintptr(src))
+ for i := uintptr(0); i < off+size; i += sys.PtrSize {
+ bits := hbits.bits()
+ if bits != 0 {
+ println(i, bits)
+ }
+ if i >= off && bits&bitPointer != 0 {
+ v := *(*unsafe.Pointer)(add(src, i))
+ if cgoIsGoPointer(v) {
+ systemstack(func() {
+ throw(cgoWriteBarrierFail)
+ })
+ }
+ }
+ hbits = hbits.next()
+ }
+}
+
+// cgoCheckBits checks the block of memory at src, for up to size
+// bytes, and throws if it finds a Go pointer. The gcbits mark each
+// pointer value. The src pointer is off bytes into the gcbits.
+//go:nosplit
+//go:nowritebarrier
+func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) {
+ skipMask := off / sys.PtrSize / 8
+ skipBytes := skipMask * sys.PtrSize * 8
+ ptrmask := addb(gcbits, skipMask)
+ src = add(src, skipBytes)
+ off -= skipBytes
+ size += off
+ var bits uint32
+ for i := uintptr(0); i < size; i += sys.PtrSize {
+ if i&(sys.PtrSize*8-1) == 0 {
+ bits = uint32(*ptrmask)
+ ptrmask = addb(ptrmask, 1)
+ } else {
+ bits >>= 1
+ }
+ if off > 0 {
+ off -= sys.PtrSize
+ } else {
+ if bits&1 != 0 {
+ v := *(*unsafe.Pointer)(add(src, i))
+ if cgoIsGoPointer(v) {
+ systemstack(func() {
+ throw(cgoWriteBarrierFail)
+ })
+ }
+ }
+ }
+ }
+}
+
+// cgoCheckUsingType is like cgoCheckTypedBlock, but is a last ditch
+// fall back to look for pointers in src using the type information.
+// We only this when looking at a value on the stack when the type
+// uses a GC program, because otherwise it's more efficient to use the
+// GC bits. This is called on the system stack.
+//go:nowritebarrier
+//go:systemstack
+func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) {
+ if typ.kind&kindNoPointers != 0 {
+ return
+ }
+ if typ.kind&kindGCProg == 0 {
+ cgoCheckBits(src, typ.gcdata, off, size)
+ return
+ }
+ switch typ.kind & kindMask {
+ default:
+ throw("can't happen")
+ case kindArray:
+ at := (*arraytype)(unsafe.Pointer(typ))
+ for i := uintptr(0); i < at.len; i++ {
+ if off < at.elem.size {
+ cgoCheckUsingType(at.elem, src, off, size)
+ }
+ src = add(src, at.elem.size)
+ skipped := off
+ if skipped > at.elem.size {
+ skipped = at.elem.size
+ }
+ checked := at.elem.size - skipped
+ off -= skipped
+ if size <= checked {
+ return
+ }
+ size -= checked
+ }
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(typ))
+ for _, f := range st.fields {
+ if off < f.typ.size {
+ cgoCheckUsingType(f.typ, src, off, size)
+ }
+ src = add(src, f.typ.size)
+ skipped := off
+ if skipped > f.typ.size {
+ skipped = f.typ.size
+ }
+ checked := f.typ.size - skipped
+ off -= skipped
+ if size <= checked {
+ return
+ }
+ size -= checked
+ }
+ }
+}
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
index 2e65e4c..d7b367f 100644
--- a/libgo/go/runtime/crash_cgo_test.go
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -21,18 +21,18 @@ func TestCgoSignalDeadlock(t *testing.T) {
if testing.Short() && runtime.GOOS == "windows" {
t.Skip("Skipping in short mode") // takes up to 64 seconds
}
- got := executeTest(t, cgoSignalDeadlockSource, nil)
+ got := runTestProg(t, "testprogcgo", "CgoSignalDeadlock")
want := "OK\n"
if got != want {
- t.Fatalf("expected %q, but got %q", want, got)
+ t.Fatalf("expected %q, but got:\n%s", want, got)
}
}
func TestCgoTraceback(t *testing.T) {
- got := executeTest(t, cgoTracebackSource, nil)
+ got := runTestProg(t, "testprogcgo", "CgoTraceback")
want := "OK\n"
if got != want {
- t.Fatalf("expected %q, but got %q", want, got)
+ t.Fatalf("expected %q, but got:\n%s", want, got)
}
}
@@ -40,13 +40,18 @@ func TestCgoCallbackGC(t *testing.T) {
if runtime.GOOS == "plan9" || runtime.GOOS == "windows" {
t.Skipf("no pthreads on %s", runtime.GOOS)
}
- if testing.Short() && runtime.GOOS == "dragonfly" {
- t.Skip("see golang.org/issue/11990")
+ if testing.Short() {
+ switch {
+ case runtime.GOOS == "dragonfly":
+ t.Skip("see golang.org/issue/11990")
+ case runtime.GOOS == "linux" && runtime.GOARCH == "arm":
+ t.Skip("too slow for arm builders")
+ }
}
- got := executeTest(t, cgoCallbackGCSource, nil)
+ got := runTestProg(t, "testprogcgo", "CgoCallbackGC")
want := "OK\n"
if got != want {
- t.Fatalf("expected %q, but got %q", want, got)
+ t.Fatalf("expected %q, but got:\n%s", want, got)
}
}
@@ -54,11 +59,7 @@ func TestCgoExternalThreadPanic(t *testing.T) {
if runtime.GOOS == "plan9" {
t.Skipf("no pthreads on %s", runtime.GOOS)
}
- csrc := cgoExternalThreadPanicC
- if runtime.GOOS == "windows" {
- csrc = cgoExternalThreadPanicC_windows
- }
- got := executeTest(t, cgoExternalThreadPanicSource, nil, "main.c", csrc)
+ got := runTestProg(t, "testprogcgo", "CgoExternalThreadPanic")
want := "panic: BOOM"
if !strings.Contains(got, want) {
t.Fatalf("want failure containing %q. output:\n%s\n", want, got)
@@ -84,15 +85,15 @@ func TestCgoExternalThreadSIGPROF(t *testing.T) {
}
}
}
- if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" {
+ if runtime.GOARCH == "ppc64" {
// TODO(austin) External linking not implemented on
// ppc64 (issue #8912)
t.Skipf("no external linking on ppc64")
}
- got := executeTest(t, cgoExternalThreadSIGPROFSource, nil)
+ got := runTestProg(t, "testprogcgo", "CgoExternalThreadSIGPROF")
want := "OK\n"
if got != want {
- t.Fatalf("expected %q, but got %q", want, got)
+ t.Fatalf("expected %q, but got:\n%s", want, got)
}
}
@@ -102,10 +103,10 @@ func TestCgoExternalThreadSignal(t *testing.T) {
case "plan9", "windows":
t.Skipf("no pthreads on %s", runtime.GOOS)
}
- got := executeTest(t, cgoExternalThreadSignalSource, nil)
+ got := runTestProg(t, "testprogcgo", "CgoExternalThreadSignal")
want := "OK\n"
if got != want {
- t.Fatalf("expected %q, but got %q", want, got)
+ t.Fatalf("expected %q, but got:\n%s", want, got)
}
}
@@ -114,368 +115,35 @@ func TestCgoDLLImports(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip("skipping windows specific test")
}
- got := executeTest(t, cgoDLLImportsMainSource, nil, "a/a.go", cgoDLLImportsPkgSource)
+ got := runTestProg(t, "testprogcgo", "CgoDLLImportsMain")
want := "OK\n"
if got != want {
t.Fatalf("expected %q, but got %v", want, got)
}
}
-const cgoSignalDeadlockSource = `
-package main
-
-import "C"
-
-import (
- "fmt"
- "runtime"
- "time"
-)
-
-func main() {
- runtime.GOMAXPROCS(100)
- ping := make(chan bool)
- go func() {
- for i := 0; ; i++ {
- runtime.Gosched()
- select {
- case done := <-ping:
- if done {
- ping <- true
- return
- }
- ping <- true
- default:
- }
- func() {
- defer func() {
- recover()
- }()
- var s *string
- *s = ""
- }()
- }
- }()
- time.Sleep(time.Millisecond)
- for i := 0; i < 64; i++ {
- go func() {
- runtime.LockOSThread()
- select {}
- }()
- go func() {
- runtime.LockOSThread()
- select {}
- }()
- time.Sleep(time.Millisecond)
- ping <- false
- select {
- case <-ping:
- case <-time.After(time.Second):
- fmt.Printf("HANG\n")
- return
- }
- }
- ping <- true
- select {
- case <-ping:
- case <-time.After(time.Second):
- fmt.Printf("HANG\n")
- return
- }
- fmt.Printf("OK\n")
-}
-`
-
-const cgoTracebackSource = `
-package main
-
-/* void foo(void) {} */
-import "C"
-
-import (
- "fmt"
- "runtime"
-)
-
-func main() {
- C.foo()
- buf := make([]byte, 1)
- runtime.Stack(buf, true)
- fmt.Printf("OK\n")
-}
-`
-
-const cgoCallbackGCSource = `
-package main
-
-import "runtime"
-
-/*
-#include <pthread.h>
-
-void go_callback();
-
-static void *thr(void *arg) {
- go_callback();
- return 0;
-}
-
-static void foo() {
- pthread_t th;
- pthread_create(&th, 0, thr, 0);
- pthread_join(th, 0);
-}
-*/
-import "C"
-import "fmt"
-
-//export go_callback
-func go_callback() {
- runtime.GC()
- grow()
- runtime.GC()
-}
-
-var cnt int
-
-func grow() {
- x := 10000
- sum := 0
- if grow1(&x, &sum) == 0 {
- panic("bad")
- }
-}
-
-func grow1(x, sum *int) int {
- if *x == 0 {
- return *sum + 1
- }
- *x--
- sum1 := *sum + *x
- return grow1(x, &sum1)
-}
-
-func main() {
- const P = 100
- done := make(chan bool)
- // allocate a bunch of stack frames and spray them with pointers
- for i := 0; i < P; i++ {
- go func() {
- grow()
- done <- true
- }()
- }
- for i := 0; i < P; i++ {
- <-done
- }
- // now give these stack frames to cgo callbacks
- for i := 0; i < P; i++ {
- go func() {
- C.foo()
- done <- true
- }()
- }
- for i := 0; i < P; i++ {
- <-done
+func TestCgoExecSignalMask(t *testing.T) {
+ // Test issue 13164.
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ t.Skipf("skipping signal mask test on %s", runtime.GOOS)
}
- fmt.Printf("OK\n")
-}
-`
-
-const cgoExternalThreadPanicSource = `
-package main
-
-// void start(void);
-import "C"
-
-func main() {
- C.start()
- select {}
-}
-
-//export gopanic
-func gopanic() {
- panic("BOOM")
-}
-`
-
-const cgoExternalThreadPanicC = `
-#include <stdlib.h>
-#include <stdio.h>
-#include <pthread.h>
-
-void gopanic(void);
-
-static void*
-die(void* x)
-{
- gopanic();
- return 0;
-}
-
-void
-start(void)
-{
- pthread_t t;
- if(pthread_create(&t, 0, die, 0) != 0)
- printf("pthread_create failed\n");
-}
-`
-
-const cgoExternalThreadPanicC_windows = `
-#include <stdlib.h>
-#include <stdio.h>
-
-void gopanic(void);
-
-static void*
-die(void* x)
-{
- gopanic();
- return 0;
-}
-
-void
-start(void)
-{
- if(_beginthreadex(0, 0, die, 0, 0, 0) != 0)
- printf("_beginthreadex failed\n");
-}
-`
-
-const cgoExternalThreadSIGPROFSource = `
-package main
-
-/*
-#include <stdint.h>
-#include <signal.h>
-#include <pthread.h>
-
-volatile int32_t spinlock;
-
-static void *thread1(void *p) {
- (void)p;
- while (spinlock == 0)
- ;
- pthread_kill(pthread_self(), SIGPROF);
- spinlock = 0;
- return NULL;
-}
-__attribute__((constructor)) void issue9456() {
- pthread_t tid;
- pthread_create(&tid, 0, thread1, NULL);
-}
-*/
-import "C"
-
-import (
- "runtime"
- "sync/atomic"
- "unsafe"
-)
-
-func main() {
- // This test intends to test that sending SIGPROF to foreign threads
- // before we make any cgo call will not abort the whole process, so
- // we cannot make any cgo call here. See https://golang.org/issue/9456.
- atomic.StoreInt32((*int32)(unsafe.Pointer(&C.spinlock)), 1)
- for atomic.LoadInt32((*int32)(unsafe.Pointer(&C.spinlock))) == 1 {
- runtime.Gosched()
+ got := runTestProg(t, "testprogcgo", "CgoExecSignalMask")
+ want := "OK\n"
+ if got != want {
+ t.Errorf("expected %q, got %v", want, got)
}
- println("OK")
-}
-`
-
-const cgoExternalThreadSignalSource = `
-package main
-
-/*
-#include <pthread.h>
-
-void **nullptr;
-
-void *crash(void *p) {
- *nullptr = p;
- return 0;
}
-int start_crashing_thread(void) {
- pthread_t tid;
- return pthread_create(&tid, 0, crash, 0);
-}
-*/
-import "C"
-
-import (
- "fmt"
- "os"
- "os/exec"
- "time"
-)
-
-func main() {
- if len(os.Args) > 1 && os.Args[1] == "crash" {
- i := C.start_crashing_thread()
- if i != 0 {
- fmt.Println("pthread_create failed:", i)
- // Exit with 0 because parent expects us to crash.
- return
- }
-
- // We should crash immediately, but give it plenty of
- // time before failing (by exiting 0) in case we are
- // running on a slow system.
- time.Sleep(5 * time.Second)
- return
+func TestEnsureDropM(t *testing.T) {
+ // Test for issue 13881.
+ switch runtime.GOOS {
+ case "windows", "plan9":
+ t.Skipf("skipping dropm test on %s", runtime.GOOS)
}
-
- out, err := exec.Command(os.Args[0], "crash").CombinedOutput()
- if err == nil {
- fmt.Println("C signal did not crash as expected\n")
- fmt.Printf("%s\n", out)
- os.Exit(1)
+ got := runTestProg(t, "testprogcgo", "EnsureDropM")
+ want := "OK\n"
+ if got != want {
+ t.Errorf("expected %q, got %v", want, got)
}
-
- fmt.Println("OK")
-}
-`
-
-const cgoDLLImportsMainSource = `
-package main
-
-/*
-#include <windows.h>
-
-DWORD getthread() {
- return GetCurrentThreadId();
-}
-*/
-import "C"
-
-import "./a"
-
-func main() {
- C.getthread()
- a.GetThread()
- println("OK")
-}
-`
-
-const cgoDLLImportsPkgSource = `
-package a
-
-/*
-#cgo CFLAGS: -mnop-fun-dllimport
-
-#include <windows.h>
-
-DWORD agetthread() {
- return GetCurrentThreadId();
-}
-*/
-import "C"
-
-func GetThread() uint32 {
- return uint32(C.agetthread())
}
-`
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
index 8efce4d..b622eb4 100644
--- a/libgo/go/runtime/crash_test.go
+++ b/libgo/go/runtime/crash_test.go
@@ -16,9 +16,18 @@ import (
"strings"
"sync"
"testing"
- "text/template"
)
+var toRemove []string
+
+func TestMain(m *testing.M) {
+ status := m.Run()
+ for _, file := range toRemove {
+ os.RemoveAll(file)
+ }
+ os.Exit(status)
+}
+
func testEnv(cmd *exec.Cmd) *exec.Cmd {
if cmd.Env != nil {
panic("environment already set")
@@ -38,55 +47,63 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd {
return cmd
}
-func executeTest(t *testing.T, templ string, data interface{}, extra ...string) string {
- testenv.MustHaveGoBuild(t)
+var testprog struct {
+ sync.Mutex
+ dir string
+ target map[string]buildexe
+}
- checkStaleRuntime(t)
+type buildexe struct {
+ exe string
+ err error
+}
- st := template.Must(template.New("crashSource").Parse(templ))
+func runTestProg(t *testing.T, binary, name string) string {
+ testenv.MustHaveGoBuild(t)
- dir, err := ioutil.TempDir("", "go-build")
+ exe, err := buildTestProg(t, binary)
if err != nil {
- t.Fatalf("failed to create temp directory: %v", err)
+ t.Fatal(err)
}
- defer os.RemoveAll(dir)
+ got, _ := testEnv(exec.Command(exe, name)).CombinedOutput()
+ return string(got)
+}
- src := filepath.Join(dir, "main.go")
- f, err := os.Create(src)
- if err != nil {
- t.Fatalf("failed to create file: %v", err)
- }
- err = st.Execute(f, data)
- if err != nil {
- f.Close()
- t.Fatalf("failed to execute template: %v", err)
- }
- if err := f.Close(); err != nil {
- t.Fatalf("failed to close file: %v", err)
- }
+func buildTestProg(t *testing.T, binary string) (string, error) {
+ checkStaleRuntime(t)
- for i := 0; i < len(extra); i += 2 {
- fname := extra[i]
- contents := extra[i+1]
- if d, _ := filepath.Split(fname); d != "" {
- if err := os.Mkdir(filepath.Join(dir, d), 0755); err != nil {
- t.Fatal(err)
- }
- }
- if err := ioutil.WriteFile(filepath.Join(dir, fname), []byte(contents), 0666); err != nil {
- t.Fatal(err)
+ testprog.Lock()
+ defer testprog.Unlock()
+ if testprog.dir == "" {
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatalf("failed to create temp directory: %v", err)
}
+ testprog.dir = dir
+ toRemove = append(toRemove, dir)
+ }
+
+ if testprog.target == nil {
+ testprog.target = make(map[string]buildexe)
+ }
+ target, ok := testprog.target[binary]
+ if ok {
+ return target.exe, target.err
}
- cmd := exec.Command("go", "build", "-o", "a.exe")
- cmd.Dir = dir
+ exe := filepath.Join(testprog.dir, binary+".exe")
+ cmd := exec.Command("go", "build", "-o", exe)
+ cmd.Dir = "testdata/" + binary
out, err := testEnv(cmd).CombinedOutput()
if err != nil {
- t.Fatalf("building source: %v\n%s", err, out)
+ exe = ""
+ target.err = fmt.Errorf("building %s: %v\n%s", binary, err, out)
+ testprog.target[binary] = target
+ return "", target.err
}
-
- got, _ := testEnv(exec.Command(filepath.Join(dir, "a.exe"))).CombinedOutput()
- return string(got)
+ target.exe = exe
+ testprog.target[binary] = target
+ return exe, nil
}
var (
@@ -115,7 +132,12 @@ func testCrashHandler(t *testing.T, cgo bool) {
type crashTest struct {
Cgo bool
}
- output := executeTest(t, crashSource, &crashTest{Cgo: cgo})
+ var output string
+ if cgo {
+ output = runTestProg(t, "testprogcgo", "Crash")
+ } else {
+ output = runTestProg(t, "testprog", "Crash")
+ }
want := "main: recovered done\nnew-thread: recovered done\nsecond-new-thread: recovered done\nmain-again: recovered done\n"
if output != want {
t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want)
@@ -126,8 +148,8 @@ func TestCrashHandler(t *testing.T) {
testCrashHandler(t, false)
}
-func testDeadlock(t *testing.T, source string) {
- output := executeTest(t, source, nil)
+func testDeadlock(t *testing.T, name string) {
+ output := runTestProg(t, "testprog", name)
want := "fatal error: all goroutines are asleep - deadlock!\n"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
@@ -135,23 +157,23 @@ func testDeadlock(t *testing.T, source string) {
}
func TestSimpleDeadlock(t *testing.T) {
- testDeadlock(t, simpleDeadlockSource)
+ testDeadlock(t, "SimpleDeadlock")
}
func TestInitDeadlock(t *testing.T) {
- testDeadlock(t, initDeadlockSource)
+ testDeadlock(t, "InitDeadlock")
}
func TestLockedDeadlock(t *testing.T) {
- testDeadlock(t, lockedDeadlockSource)
+ testDeadlock(t, "LockedDeadlock")
}
func TestLockedDeadlock2(t *testing.T) {
- testDeadlock(t, lockedDeadlockSource2)
+ testDeadlock(t, "LockedDeadlock2")
}
func TestGoexitDeadlock(t *testing.T) {
- output := executeTest(t, goexitDeadlockSource, nil)
+ output := runTestProg(t, "testprog", "GoexitDeadlock")
want := "no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
@@ -159,15 +181,15 @@ func TestGoexitDeadlock(t *testing.T) {
}
func TestStackOverflow(t *testing.T) {
- output := executeTest(t, stackOverflowSource, nil)
- want := "runtime: goroutine stack exceeds 4194304-byte limit\nfatal error: stack overflow"
+ output := runTestProg(t, "testprog", "StackOverflow")
+ want := "runtime: goroutine stack exceeds 1474560-byte limit\nfatal error: stack overflow"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
func TestThreadExhaustion(t *testing.T) {
- output := executeTest(t, threadExhaustionSource, nil)
+ output := runTestProg(t, "testprog", "ThreadExhaustion")
want := "runtime: program exceeds 10-thread limit\nfatal error: thread exhaustion"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
@@ -175,7 +197,7 @@ func TestThreadExhaustion(t *testing.T) {
}
func TestRecursivePanic(t *testing.T) {
- output := executeTest(t, recursivePanicSource, nil)
+ output := runTestProg(t, "testprog", "RecursivePanic")
want := `wrap: bad
panic: again
@@ -187,7 +209,7 @@ panic: again
}
func TestGoexitCrash(t *testing.T) {
- output := executeTest(t, goexitExitSource, nil)
+ output := runTestProg(t, "testprog", "GoexitExit")
want := "no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
@@ -211,15 +233,15 @@ func TestGoexitDefer(t *testing.T) {
}
func TestGoNil(t *testing.T) {
- output := executeTest(t, goNilSource, nil)
+ output := runTestProg(t, "testprog", "GoNil")
want := "go of nil func value"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
-func TestMainGoroutineId(t *testing.T) {
- output := executeTest(t, mainGoroutineIdSource, nil)
+func TestMainGoroutineID(t *testing.T) {
+ output := runTestProg(t, "testprog", "MainGoroutineID")
want := "panic: test\n\ngoroutine 1 [running]:\n"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
@@ -227,7 +249,7 @@ func TestMainGoroutineId(t *testing.T) {
}
func TestNoHelperGoroutines(t *testing.T) {
- output := executeTest(t, noHelperGoroutinesSource, nil)
+ output := runTestProg(t, "testprog", "NoHelperGoroutines")
matches := regexp.MustCompile(`goroutine [0-9]+ \[`).FindAllStringSubmatch(output, -1)
if len(matches) != 1 || matches[0][0] != "goroutine 1 [" {
t.Fatalf("want to see only goroutine 1, see:\n%s", output)
@@ -235,311 +257,39 @@ func TestNoHelperGoroutines(t *testing.T) {
}
func TestBreakpoint(t *testing.T) {
- output := executeTest(t, breakpointSource, nil)
+ output := runTestProg(t, "testprog", "Breakpoint")
want := "runtime.Breakpoint()"
if !strings.Contains(output, want) {
t.Fatalf("output:\n%s\n\nwant output containing: %s", output, want)
}
}
-const crashSource = `
-package main
-
-import (
- "fmt"
- "runtime"
-)
-
-{{if .Cgo}}
-import "C"
-{{end}}
-
-func test(name string) {
- defer func() {
- if x := recover(); x != nil {
- fmt.Printf(" recovered")
- }
- fmt.Printf(" done\n")
- }()
- fmt.Printf("%s:", name)
- var s *string
- _ = *s
- fmt.Print("SHOULD NOT BE HERE")
-}
-
-func testInNewThread(name string) {
- c := make(chan bool)
- go func() {
- runtime.LockOSThread()
- test(name)
- c <- true
- }()
- <-c
-}
-
-func main() {
- runtime.LockOSThread()
- test("main")
- testInNewThread("new-thread")
- testInNewThread("second-new-thread")
- test("main-again")
-}
-`
-
-const simpleDeadlockSource = `
-package main
-func main() {
- select {}
-}
-`
-
-const initDeadlockSource = `
-package main
-func init() {
- select {}
-}
-func main() {
-}
-`
-
-const lockedDeadlockSource = `
-package main
-import "runtime"
-func main() {
- runtime.LockOSThread()
- select {}
-}
-`
-
-const lockedDeadlockSource2 = `
-package main
-import (
- "runtime"
- "time"
-)
-func main() {
- go func() {
- runtime.LockOSThread()
- select {}
- }()
- time.Sleep(time.Millisecond)
- select {}
-}
-`
-
-const goexitDeadlockSource = `
-package main
-import (
- "runtime"
-)
-
-func F() {
- for i := 0; i < 10; i++ {
- }
-}
-
-func main() {
- go F()
- go F()
- runtime.Goexit()
-}
-`
-
-const stackOverflowSource = `
-package main
-
-import "runtime/debug"
-
-func main() {
- debug.SetMaxStack(4<<20)
- f(make([]byte, 10))
-}
-
-func f(x []byte) byte {
- var buf [64<<10]byte
- return x[0] + f(buf[:])
-}
-`
-
-const threadExhaustionSource = `
-package main
-
-import (
- "runtime"
- "runtime/debug"
-)
-
-func main() {
- debug.SetMaxThreads(10)
- c := make(chan int)
- for i := 0; i < 100; i++ {
- go func() {
- runtime.LockOSThread()
- c <- 0
- select{}
- }()
- <-c
- }
-}
-`
-
-const recursivePanicSource = `
-package main
-
-import (
- "fmt"
-)
-
-func main() {
- func() {
- defer func() {
- fmt.Println(recover())
- }()
- var x [8192]byte
- func(x [8192]byte) {
- defer func() {
- if err := recover(); err != nil {
- panic("wrap: " + err.(string))
- }
- }()
- panic("bad")
- }(x)
- }()
- panic("again")
-}
-`
-
-const goexitExitSource = `
-package main
-
-import (
- "runtime"
- "time"
-)
-
-func main() {
- go func() {
- time.Sleep(time.Millisecond)
- }()
- i := 0
- runtime.SetFinalizer(&i, func(p *int) {})
- runtime.GC()
- runtime.Goexit()
-}
-`
-
-const goNilSource = `
-package main
-
-func main() {
- defer func() {
- recover()
- }()
- var f func()
- go f()
- select{}
-}
-`
-
-const mainGoroutineIdSource = `
-package main
-func main() {
- panic("test")
-}
-`
-
-const noHelperGoroutinesSource = `
-package main
-import (
- "runtime"
- "time"
-)
-func init() {
- i := 0
- runtime.SetFinalizer(&i, func(p *int) {})
- time.AfterFunc(time.Hour, func() {})
- panic("oops")
-}
-func main() {
-}
-`
-
-const breakpointSource = `
-package main
-import "runtime"
-func main() {
- runtime.Breakpoint()
-}
-`
-
func TestGoexitInPanic(t *testing.T) {
// see issue 8774: this code used to trigger an infinite recursion
- output := executeTest(t, goexitInPanicSource, nil)
+ output := runTestProg(t, "testprog", "GoexitInPanic")
want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
-const goexitInPanicSource = `
-package main
-import "runtime"
-func main() {
- go func() {
- defer func() {
- runtime.Goexit()
- }()
- panic("hello")
- }()
- runtime.Goexit()
-}
-`
-
func TestPanicAfterGoexit(t *testing.T) {
// an uncaught panic should still work after goexit
- output := executeTest(t, panicAfterGoexitSource, nil)
+ output := runTestProg(t, "testprog", "PanicAfterGoexit")
want := "panic: hello"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
-const panicAfterGoexitSource = `
-package main
-import "runtime"
-func main() {
- defer func() {
- panic("hello")
- }()
- runtime.Goexit()
-}
-`
-
func TestRecoveredPanicAfterGoexit(t *testing.T) {
- output := executeTest(t, recoveredPanicAfterGoexitSource, nil)
+ output := runTestProg(t, "testprog", "RecoveredPanicAfterGoexit")
want := "fatal error: no goroutines (main called runtime.Goexit) - deadlock!"
if !strings.HasPrefix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
-const recoveredPanicAfterGoexitSource = `
-package main
-import "runtime"
-func main() {
- defer func() {
- defer func() {
- r := recover()
- if r == nil {
- panic("bad recover")
- }
- }()
- panic("hello")
- }()
- runtime.Goexit()
-}
-`
-
func TestRecoverBeforePanicAfterGoexit(t *testing.T) {
// 1. defer a function that recovers
// 2. defer a function that panics
@@ -561,29 +311,9 @@ func TestRecoverBeforePanicAfterGoexit(t *testing.T) {
}
func TestNetpollDeadlock(t *testing.T) {
- output := executeTest(t, netpollDeadlockSource, nil)
+ output := runTestProg(t, "testprognet", "NetpollDeadlock")
want := "done\n"
if !strings.HasSuffix(output, want) {
t.Fatalf("output does not start with %q:\n%s", want, output)
}
}
-
-const netpollDeadlockSource = `
-package main
-import (
- "fmt"
- "net"
-)
-func init() {
- fmt.Println("dialing")
- c, err := net.Dial("tcp", "localhost:14356")
- if err == nil {
- c.Close()
- } else {
- fmt.Println("error: ", err)
- }
-}
-func main() {
- fmt.Println("done")
-}
-`
diff --git a/libgo/go/runtime/crash_unix_test.go b/libgo/go/runtime/crash_unix_test.go
index b925d02..5284a37 100644
--- a/libgo/go/runtime/crash_unix_test.go
+++ b/libgo/go/runtime/crash_unix_test.go
@@ -133,3 +133,33 @@ func loop(i int, c chan bool) {
}
}
`
+
+func TestSignalExitStatus(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ switch runtime.GOOS {
+ case "netbsd":
+ t.Skip("skipping on NetBSD; see https://golang.org/issue/14063")
+ }
+ exe, err := buildTestProg(t, "testprog")
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = testEnv(exec.Command(exe, "SignalExitStatus")).Run()
+ if err == nil {
+ t.Error("test program succeeded unexpectedly")
+ } else if ee, ok := err.(*exec.ExitError); !ok {
+ t.Errorf("error (%v) has type %T; expected exec.ExitError", err, err)
+ } else if ws, ok := ee.Sys().(syscall.WaitStatus); !ok {
+ t.Errorf("error.Sys (%v) has type %T; expected syscall.WaitStatus", ee.Sys(), ee.Sys())
+ } else if !ws.Signaled() || ws.Signal() != syscall.SIGTERM {
+ t.Errorf("got %v; expected SIGTERM", ee)
+ }
+}
+
+func TestSignalIgnoreSIGTRAP(t *testing.T) {
+ output := runTestProg(t, "testprognet", "SignalIgnoreSIGTRAP")
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("want %s, got %s\n", want, output)
+ }
+}
diff --git a/libgo/go/runtime/debug.go b/libgo/go/runtime/debug.go
index bcdde4b..0c915a2 100644
--- a/libgo/go/runtime/debug.go
+++ b/libgo/go/runtime/debug.go
@@ -23,7 +23,11 @@ func UnlockOSThread()
// This call will go away when the scheduler improves.
func GOMAXPROCS(n int) int
-// NumCPU returns the number of logical CPUs on the local machine.
+// NumCPU returns the number of logical CPUs usable by the current process.
+//
+// The set of available CPUs is checked by querying the operating system
+// at process startup. Changes to operating system CPU allocation after
+// process startup are not reflected.
func NumCPU() int
// NumCgoCall returns the number of cgo calls made by the current process.
diff --git a/libgo/go/runtime/debug/garbage.go b/libgo/go/runtime/debug/garbage.go
index c3363f9..0f8a44c 100644
--- a/libgo/go/runtime/debug/garbage.go
+++ b/libgo/go/runtime/debug/garbage.go
@@ -151,3 +151,14 @@ func SetPanicOnFault(enabled bool) bool
// it to the given file descriptor.
// The heap dump format is defined at https://golang.org/s/go13heapdump.
func WriteHeapDump(fd uintptr)
+
+// SetTraceback sets the amount of detail printed by the runtime in
+// the traceback it prints before exiting due to an unrecovered panic
+// or an internal runtime error.
+// The level argument takes the same values as the GOTRACEBACK
+// environment variable. For example, SetTraceback("all") ensure
+// that the program prints all goroutines when it crashes.
+// See the package runtime documentation for details.
+// If SetTraceback is called with a level lower than that of the
+// environment variable, the call is ignored.
+func SetTraceback(level string)
diff --git a/libgo/go/runtime/debug/garbage_test.go b/libgo/go/runtime/debug/garbage_test.go
index 13e1845..21bf6eb 100644
--- a/libgo/go/runtime/debug/garbage_test.go
+++ b/libgo/go/runtime/debug/garbage_test.go
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package debug
+package debug_test
import (
"runtime"
+ . "runtime/debug"
"testing"
"time"
)
@@ -75,7 +76,7 @@ func TestReadGCStats(t *testing.T) {
var big = make([]byte, 1<<20)
func TestFreeOSMemory(t *testing.T) {
- if runtime.GOARCH == "arm64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" ||
+ if runtime.GOARCH == "arm64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "mips64" || runtime.GOARCH == "mips64le" ||
runtime.GOOS == "nacl" {
t.Skip("issue 9993; scavenger temporarily disabled on systems with physical pages larger than logical pages")
}
diff --git a/libgo/go/runtime/debug/heapdump_test.go b/libgo/go/runtime/debug/heapdump_test.go
index cb2f2f0..5761c01 100644
--- a/libgo/go/runtime/debug/heapdump_test.go
+++ b/libgo/go/runtime/debug/heapdump_test.go
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package debug
+package debug_test
import (
"io/ioutil"
"os"
"runtime"
+ . "runtime/debug"
"testing"
)
diff --git a/libgo/go/runtime/debug/stack.go b/libgo/go/runtime/debug/stack.go
index ab12bff..5d810af 100644
--- a/libgo/go/runtime/debug/stack.go
+++ b/libgo/go/runtime/debug/stack.go
@@ -7,92 +7,24 @@
package debug
import (
- "bytes"
- "fmt"
- "io/ioutil"
"os"
"runtime"
)
-var (
- dunno = []byte("???")
- centerDot = []byte("·")
- dot = []byte(".")
- slash = []byte("/")
-)
-
-// PrintStack prints to standard error the stack trace returned by Stack.
+// PrintStack prints to standard error the stack trace returned by runtime.Stack.
func PrintStack() {
- os.Stderr.Write(stack())
+ os.Stderr.Write(Stack())
}
// Stack returns a formatted stack trace of the goroutine that calls it.
-// For each routine, it includes the source line information and PC value,
-// then attempts to discover, for Go functions, the calling function or
-// method and the text of the line containing the invocation.
-//
-// Deprecated: Use package runtime's Stack instead.
+// It calls runtime.Stack with a large enough buffer to capture the entire trace.
func Stack() []byte {
- return stack()
-}
-
-// stack implements Stack, skipping 2 frames
-func stack() []byte {
- buf := new(bytes.Buffer) // the returned data
- // As we loop, we open files and read them. These variables record the currently
- // loaded file.
- var lines [][]byte
- var lastFile string
- for i := 2; ; i++ { // Caller we care about is the user, 2 frames up
- pc, file, line, ok := runtime.Caller(i)
- if !ok {
- break
+ buf := make([]byte, 1024)
+ for {
+ n := runtime.Stack(buf, false)
+ if n < len(buf) {
+ return buf[:n]
}
- // Print this much at least. If we can't find the source, it won't show.
- fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
- if file != lastFile {
- data, err := ioutil.ReadFile(file)
- if err != nil {
- continue
- }
- lines = bytes.Split(data, []byte{'\n'})
- lastFile = file
- }
- line-- // in stack trace, lines are 1-indexed but our array is 0-indexed
- fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
- }
- return buf.Bytes()
-}
-
-// source returns a space-trimmed slice of the n'th line.
-func source(lines [][]byte, n int) []byte {
- if n < 0 || n >= len(lines) {
- return dunno
- }
- return bytes.Trim(lines[n], " \t")
-}
-
-// function returns, if possible, the name of the function containing the PC.
-func function(pc uintptr) []byte {
- fn := runtime.FuncForPC(pc)
- if fn == nil {
- return dunno
- }
- name := []byte(fn.Name())
- // The name includes the path name to the package, which is unnecessary
- // since the file name is already included. Plus, it has center dots.
- // That is, we see
- // runtime/debug.*T·ptrmethod
- // and want
- // *T.ptrmethod
- // Since the package path might contains dots (e.g. code.google.com/...),
- // we first remove the path prefix if there is one.
- if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
- name = name[lastslash+1:]
- }
- if period := bytes.Index(name, dot); period >= 0 {
- name = name[period+1:]
+ buf = make([]byte, 2*len(buf))
}
- name = bytes.Replace(name, centerDot, dot, -1)
- return name
}
diff --git a/libgo/go/runtime/debug/stack_test.go b/libgo/go/runtime/debug/stack_test.go
index 263d715..0f769ee 100644
--- a/libgo/go/runtime/debug/stack_test.go
+++ b/libgo/go/runtime/debug/stack_test.go
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package debug
+package debug_test
import (
+ . "runtime/debug"
"strings"
"testing"
)
@@ -22,16 +23,19 @@ func (t T) method() []byte {
The traceback should look something like this, modulo line numbers and hex constants.
Don't worry much about the base levels, but check the ones in our own package.
- /Users/r/go/src/pkg/runtime/debug/stack_test.go:15 (0x13878)
- (*T).ptrmethod: return Stack()
- /Users/r/go/src/pkg/runtime/debug/stack_test.go:18 (0x138dd)
- T.method: return t.ptrmethod()
- /Users/r/go/src/pkg/runtime/debug/stack_test.go:23 (0x13920)
- TestStack: b := T(0).method()
- /Users/r/go/src/pkg/testing/testing.go:132 (0x14a7a)
- tRunner: test.F(t)
- /Users/r/go/src/pkg/runtime/proc.c:145 (0xc970)
- ???: runtime·unlock(&runtime·sched);
+ goroutine 10 [running]:
+ runtime/debug.Stack(0x0, 0x0, 0x0)
+ /Users/r/go/src/runtime/debug/stack.go:28 +0x80
+ runtime/debug.(*T).ptrmethod(0xc82005ee70, 0x0, 0x0, 0x0)
+ /Users/r/go/src/runtime/debug/stack_test.go:15 +0x29
+ runtime/debug.T.method(0x0, 0x0, 0x0, 0x0)
+ /Users/r/go/src/runtime/debug/stack_test.go:18 +0x32
+ runtime/debug.TestStack(0xc8201ce000)
+ /Users/r/go/src/runtime/debug/stack_test.go:37 +0x38
+ testing.tRunner(0xc8201ce000, 0x664b58)
+ /Users/r/go/src/testing/testing.go:456 +0x98
+ created by testing.RunTests
+ /Users/r/go/src/testing/testing.go:561 +0x86d
*/
func TestStack(t *testing.T) {
b := T(0).method()
@@ -41,13 +45,10 @@ func TestStack(t *testing.T) {
}
n := 0
frame := func(line, code string) {
+ check(t, lines[n], code)
+ n++
check(t, lines[n], line)
n++
- // The source might not be available while running the test.
- if strings.HasPrefix(lines[n], "\t") {
- check(t, lines[n], code)
- n++
- }
}
frame("stack_test.go", "\tmethod.N15_runtime_debug.T: return Stack()")
frame("stack_test.go", "\tmethod.N15_runtime_debug.T: return t.ptrmethod()")
diff --git a/libgo/go/runtime/defs_linux_mips64x.go b/libgo/go/runtime/defs_linux_mips64x.go
new file mode 100644
index 0000000..bb3cd98
--- /dev/null
+++ b/libgo/go/runtime/defs_linux_mips64x.go
@@ -0,0 +1,183 @@
+// +build mips64 mips64le
+// +build linux
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EAGAIN = 0xb
+ _ENOMEM = 0xc
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x800
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+ _MADV_HUGEPAGE = 0xe
+ _MADV_NOHUGEPAGE = 0xf
+
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_SIGINFO = 0x8
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGEMT = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGBUS = 0xa
+ _SIGSEGV = 0xb
+ _SIGSYS = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGUSR1 = 0x10
+ _SIGUSR2 = 0x11
+ _SIGCHLD = 0x12
+ _SIGPWR = 0x13
+ _SIGWINCH = 0x14
+ _SIGURG = 0x15
+ _SIGIO = 0x16
+ _SIGSTOP = 0x17
+ _SIGTSTP = 0x18
+ _SIGCONT = 0x19
+ _SIGTTIN = 0x1a
+ _SIGTTOU = 0x1b
+ _SIGVTALRM = 0x1c
+ _SIGPROF = 0x1d
+ _SIGXCPU = 0x1e
+ _SIGXFSZ = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+)
+
+//struct Sigset {
+// uint64 sig[1];
+//};
+//typedef uint64 Sigset;
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+func (ts *timespec) set_sec(x int64) {
+ ts.tv_sec = x
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type sigactiont struct {
+ sa_flags uint32
+ sa_handler uintptr
+ sa_mask [2]uint64
+ // linux header does not have sa_restorer field,
+ // but it is used in setsig(). it is no harm to put it here
+ sa_restorer uintptr
+}
+
+type siginfo struct {
+ si_signo int32
+ si_code int32
+ si_errno int32
+ __pad0 [1]int32
+ // below here is a union; si_addr is the only field we use
+ si_addr uint64
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type epollevent struct {
+ events uint32
+ pad_cgo_0 [4]byte
+ data [8]byte // unaligned uintptr
+}
+
+const (
+ _O_RDONLY = 0x0
+ _O_CLOEXEC = 0x80000
+ _SA_RESTORER = 0
+)
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_size uintptr
+ ss_flags int32
+}
+
+type sigcontext struct {
+ sc_regs [32]uint64
+ sc_fpregs [32]uint64
+ sc_mdhi uint64
+ sc_hi1 uint64
+ sc_hi2 uint64
+ sc_hi3 uint64
+ sc_mdlo uint64
+ sc_lo1 uint64
+ sc_lo2 uint64
+ sc_lo3 uint64
+ sc_pc uint64
+ sc_fpc_csr uint32
+ sc_used_math uint32
+ sc_dsp uint32
+ sc_reserved uint32
+}
+
+type ucontext struct {
+ uc_flags uint64
+ uc_link *ucontext
+ uc_stack sigaltstackt
+ uc_mcontext sigcontext
+ uc_sigmask uint64
+}
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 8782914..fd328a1 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -6,7 +6,9 @@
package runtime
-import "unsafe"
+import (
+ "unsafe"
+)
//var Fadd64 = fadd64
//var Fsub64 = fsub64
@@ -135,22 +137,22 @@ func setenvs([]string)
var Envs = envs
var SetEnvs = setenvs
-//var BigEndian = _BigEndian
+//var BigEndian = sys.BigEndian
// For benchmarking.
/*
func BenchSetType(n int, x interface{}) {
- e := *(*eface)(unsafe.Pointer(&x))
+ e := *efaceOf(&x)
t := e._type
var size uintptr
var p unsafe.Pointer
switch t.kind & kindMask {
- case _KindPtr:
+ case kindPtr:
t = (*ptrtype)(unsafe.Pointer(t)).elem
size = t.size
p = e.data
- case _KindSlice:
+ case kindSlice:
slice := *(*struct {
ptr unsafe.Pointer
len, cap uintptr
@@ -167,8 +169,15 @@ func BenchSetType(n int, x interface{}) {
})
}
-const PtrSize = ptrSize
+const PtrSize = sys.PtrSize
var TestingAssertE2I2GC = &testingAssertE2I2GC
var TestingAssertE2T2GC = &testingAssertE2T2GC
+
+var ForceGCPeriod = &forcegcperiod
*/
+
+// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
+// the "environment" traceback level, so later calls to
+// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
+func SetTracebackEnv(level string)
diff --git a/libgo/go/runtime/export_windows_test.go b/libgo/go/runtime/export_windows_test.go
index 61fcef9..f712c6f 100644
--- a/libgo/go/runtime/export_windows_test.go
+++ b/libgo/go/runtime/export_windows_test.go
@@ -6,4 +6,12 @@
package runtime
+import "unsafe"
+
var TestingWER = &testingWER
+
+func NumberOfProcessors() int32 {
+ var info systeminfo
+ stdcall1(_GetSystemInfo, uintptr(unsafe.Pointer(&info)))
+ return int32(info.dwnumberofprocessors)
+}
diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go
index 6301d01..eca54a7 100644
--- a/libgo/go/runtime/extern.go
+++ b/libgo/go/runtime/extern.go
@@ -27,6 +27,13 @@ It is a comma-separated list of name=val pairs setting these named variables:
allocfreetrace: setting allocfreetrace=1 causes every allocation to be
profiled and a stack trace printed on each object's allocation and free.
+ cgocheck: setting cgocheck=0 disables all checks for packages
+ using cgo to incorrectly pass Go pointers to non-Go code.
+ Setting cgocheck=1 (the default) enables relatively cheap
+ checks that may miss some errors. Setting cgocheck=2 enables
+ expensive checks that should not miss any errors, but will
+ cause your program to run slower.
+
efence: setting efence=1 causes the allocator to run in a mode
where each object is allocated on a unique page and addresses are
never recycled.
@@ -59,7 +66,7 @@ It is a comma-separated list of name=val pairs setting these named variables:
length of the pause. Setting gctrace=2 emits the same summary but also
repeats each collection. The format of this line is subject to change.
Currently, it is:
- gc # @#s #%: #+...+# ms clock, #+...+# ms cpu, #->#-># MB, # MB goal, # P
+ gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P
where the fields are as follows:
gc # the GC number, incremented at each GC
@#s time in seconds since program start
@@ -68,9 +75,9 @@ It is a comma-separated list of name=val pairs setting these named variables:
#->#-># MB heap size at GC start, at GC end, and live heap
# MB goal goal heap size
# P number of processors used
- The phases are stop-the-world (STW) sweep termination, scan,
- synchronize Ps, mark, and STW mark termination. The CPU times
- for mark are broken down in to assist time (GC performed in
+ The phases are stop-the-world (STW) sweep termination, concurrent
+ mark and scan, and STW mark termination. The CPU times
+ for mark/scan are broken down in to assist time (GC performed in
line with allocation), background GC time, and idle GC time.
If the line ends with "(forced)", this GC was forced by a
runtime.GC() call and all phases are STW.
@@ -96,6 +103,9 @@ It is a comma-separated list of name=val pairs setting these named variables:
schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
error every X milliseconds, summarizing the scheduler state.
+The net and net/http packages also refer to debugging variables in GODEBUG.
+See the documentation for those packages for details.
+
The GOMAXPROCS variable limits the number of operating system threads that
can execute user-level Go code simultaneously. There is no limit to the number of threads
that can be blocked in system calls on behalf of Go code; those do not count against
@@ -104,15 +114,24 @@ the limit.
The GOTRACEBACK variable controls the amount of output generated when a Go
program fails due to an unrecovered panic or an unexpected runtime condition.
-By default, a failure prints a stack trace for every extant goroutine, eliding functions
-internal to the run-time system, and then exits with exit code 2.
-If GOTRACEBACK=0, the per-goroutine stack traces are omitted entirely.
-If GOTRACEBACK=1, the default behavior is used.
-If GOTRACEBACK=2, the per-goroutine stack traces include run-time functions.
-If GOTRACEBACK=crash, the per-goroutine stack traces include run-time functions,
-and if possible the program crashes in an operating-specific manner instead of
-exiting. For example, on Unix systems, the program raises SIGABRT to trigger a
-core dump.
+By default, a failure prints a stack trace for the current goroutine,
+eliding functions internal to the run-time system, and then exits with exit code 2.
+The failure prints stack traces for all goroutines if there is no current goroutine
+or the failure is internal to the run-time.
+GOTRACEBACK=none omits the goroutine stack traces entirely.
+GOTRACEBACK=single (the default) behaves as described above.
+GOTRACEBACK=all adds stack traces for all user-created goroutines.
+GOTRACEBACK=system is like ``all'' but adds stack frames for run-time functions
+and shows goroutines created internally by the run-time.
+GOTRACEBACK=crash is like ``system'' but crashes in an operating system-specific
+manner instead of exiting. For example, on Unix systems, the crash raises
+SIGABRT to trigger a core dump.
+For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for
+none, all, and system, respectively.
+The runtime/debug package's SetTraceback function allows increasing the
+amount of output at run time, but it cannot reduce the amount below that
+specified by the environment variable.
+See https://golang.org/pkg/runtime/debug/#SetTraceback.
The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete
the set of Go environment variables. They influence the building of Go programs
diff --git a/libgo/go/runtime/fastlog2.go b/libgo/go/runtime/fastlog2.go
new file mode 100644
index 0000000..b22e825
--- /dev/null
+++ b/libgo/go/runtime/fastlog2.go
@@ -0,0 +1,33 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// fastlog2 implements a fast approximation to the base 2 log of a
+// float64. This is used to compute a geometric distribution for heap
+// sampling, without introducing dependences into package math. This
+// uses a very rough approximation using the float64 exponent and the
+// first 25 bits of the mantissa. The top 5 bits of the mantissa are
+// used to load limits from a table of constants and the rest are used
+// to scale linearly between them.
+func fastlog2(x float64) float64 {
+ const fastlogScaleBits = 20
+ const fastlogScaleRatio = 1.0 / (1 << fastlogScaleBits)
+
+ xBits := float64bits(x)
+ // Extract the exponent from the IEEE float64, and index a constant
+ // table with the first 10 bits from the mantissa.
+ xExp := int64((xBits>>52)&0x7FF) - 1023
+ xManIndex := (xBits >> (52 - fastlogNumBits)) % (1 << fastlogNumBits)
+ xManScale := (xBits >> (52 - fastlogNumBits - fastlogScaleBits)) % (1 << fastlogScaleBits)
+
+ low, high := fastlog2Table[xManIndex], fastlog2Table[xManIndex+1]
+ return float64(xExp) + low + (high-low)*float64(xManScale)*fastlogScaleRatio
+}
+
+// float64bits returns the IEEE 754 binary representation of f.
+// Taken from math.Float64bits to avoid dependences into package math.
+func float64bits(f float64) uint64 { return *(*uint64)(unsafe.Pointer(&f)) }
diff --git a/libgo/go/runtime/fastlog2_test.go b/libgo/go/runtime/fastlog2_test.go
new file mode 100644
index 0000000..8f92dc6
--- /dev/null
+++ b/libgo/go/runtime/fastlog2_test.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+package runtime_test
+
+import (
+ "math"
+ "runtime"
+ "testing"
+)
+
+func TestFastLog2(t *testing.T) {
+ // Compute the euclidean distance between math.Log2 and the FastLog2
+ // implementation over the range of interest for heap sampling.
+ const randomBitCount = 26
+ var e float64
+
+ inc := 1
+ if testing.Short() {
+ // Check 1K total values, down from 64M.
+ inc = 1 << 16
+ }
+ for i := 1; i < 1<<randomBitCount; i += inc {
+ l, fl := math.Log2(float64(i)), runtime.Fastlog2(float64(i))
+ d := l - fl
+ e += d * d
+ }
+ e = math.Sqrt(e)
+
+ if e > 1.0 {
+ t.Fatalf("imprecision on fastlog2 implementation, want <=1.0, got %f", e)
+ }
+}
diff --git a/libgo/go/runtime/fastlog2table.go b/libgo/go/runtime/fastlog2table.go
new file mode 100644
index 0000000..c36d583
--- /dev/null
+++ b/libgo/go/runtime/fastlog2table.go
@@ -0,0 +1,43 @@
+// AUTO-GENERATED by mkfastlog2table.go
+// Run go generate from src/runtime to update.
+// See mkfastlog2table.go for comments.
+
+package runtime
+
+const fastlogNumBits = 5
+
+var fastlog2Table = [1<<fastlogNumBits + 1]float64{
+ 0,
+ 0.0443941193584535,
+ 0.08746284125033943,
+ 0.12928301694496647,
+ 0.16992500144231248,
+ 0.2094533656289499,
+ 0.24792751344358555,
+ 0.28540221886224837,
+ 0.3219280948873623,
+ 0.3575520046180837,
+ 0.39231742277876036,
+ 0.4262647547020979,
+ 0.4594316186372973,
+ 0.4918530963296748,
+ 0.5235619560570128,
+ 0.5545888516776374,
+ 0.5849625007211563,
+ 0.6147098441152082,
+ 0.6438561897747247,
+ 0.6724253419714956,
+ 0.7004397181410922,
+ 0.7279204545631992,
+ 0.7548875021634686,
+ 0.7813597135246596,
+ 0.8073549220576042,
+ 0.8328900141647417,
+ 0.8579809951275721,
+ 0.8826430493618412,
+ 0.9068905956085185,
+ 0.9307373375628862,
+ 0.9541963103868752,
+ 0.9772799234999164,
+ 1,
+}
diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go
index 2a95cc7..71d4656 100644
--- a/libgo/go/runtime/gc_test.go
+++ b/libgo/go/runtime/gc_test.go
@@ -18,59 +18,13 @@ func TestGcSys(t *testing.T) {
if os.Getenv("GOGC") == "off" {
t.Skip("skipping test; GOGC=off in environment")
}
- data := struct{ Short bool }{testing.Short()}
- got := executeTest(t, testGCSysSource, &data)
+ got := runTestProg(t, "testprog", "GCSys")
want := "OK\n"
if got != want {
t.Fatalf("expected %q, but got %q", want, got)
}
}
-const testGCSysSource = `
-package main
-
-import (
- "fmt"
- "runtime"
-)
-
-func main() {
- runtime.GOMAXPROCS(1)
- memstats := new(runtime.MemStats)
- runtime.GC()
- runtime.ReadMemStats(memstats)
- sys := memstats.Sys
-
- runtime.MemProfileRate = 0 // disable profiler
-
- itercount := 1000000
-{{if .Short}}
- itercount = 100000
-{{end}}
- for i := 0; i < itercount; i++ {
- workthegc()
- }
-
- // Should only be using a few MB.
- // We allocated 100 MB or (if not short) 1 GB.
- runtime.ReadMemStats(memstats)
- if sys > memstats.Sys {
- sys = 0
- } else {
- sys = memstats.Sys - sys
- }
- if sys > 16<<20 {
- fmt.Printf("using too much memory: %d bytes\n", sys)
- return
- }
- fmt.Printf("OK\n")
-}
-
-func workthegc() []byte {
- return make([]byte, 1029)
-}
-`
-
func TestGcDeepNesting(t *testing.T) {
type T [2][2][2][2][2][2][2][2][2][2]*int
a := new(T)
@@ -198,6 +152,39 @@ func TestHugeGCInfo(t *testing.T) {
}
}
+/*
+func TestPeriodicGC(t *testing.T) {
+ // Make sure we're not in the middle of a GC.
+ runtime.GC()
+
+ var ms1, ms2 runtime.MemStats
+ runtime.ReadMemStats(&ms1)
+
+ // Make periodic GC run continuously.
+ orig := *runtime.ForceGCPeriod
+ *runtime.ForceGCPeriod = 0
+
+ // Let some periodic GCs happen. In a heavily loaded system,
+ // it's possible these will be delayed, so this is designed to
+ // succeed quickly if things are working, but to give it some
+ // slack if things are slow.
+ var numGCs uint32
+ const want = 2
+ for i := 0; i < 20 && numGCs < want; i++ {
+ time.Sleep(5 * time.Millisecond)
+
+ // Test that periodic GC actually happened.
+ runtime.ReadMemStats(&ms2)
+ numGCs = ms2.NumGC - ms1.NumGC
+ }
+ *runtime.ForceGCPeriod = orig
+
+ if numGCs < want {
+ t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
+ }
+}
+*/
+
func BenchmarkSetTypePtr(b *testing.B) {
benchSetType(b, new(*byte))
}
@@ -481,10 +468,12 @@ func TestAssertE2T2Liveness(t *testing.T) {
testIfaceEqual(io.EOF)
}
+var a bool
+
+//go:noinline
func testIfaceEqual(x interface{}) {
if x == "abc" {
- // Prevent inlining
- panic("")
+ a = true
}
}
diff --git a/libgo/go/runtime/gcinfo_test.go b/libgo/go/runtime/gcinfo_test.go
index 7e345e5..d3262a6 100644
--- a/libgo/go/runtime/gcinfo_test.go
+++ b/libgo/go/runtime/gcinfo_test.go
@@ -130,7 +130,7 @@ func infoBigStruct() []byte {
typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
- case "arm64", "amd64", "ppc64", "ppc64le":
+ case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le":
return []byte{
typePointer, // q *int
typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
diff --git a/libgo/go/runtime/lfstack_linux_mips64x.go b/libgo/go/runtime/lfstack_linux_mips64x.go
new file mode 100644
index 0000000..49b6558
--- /dev/null
+++ b/libgo/go/runtime/lfstack_linux_mips64x.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips64 mips64le
+// +build linux
+
+package runtime
+
+import "unsafe"
+
+// On mips64, Linux limits the user address space to 40 bits (see
+// TASK_SIZE64 in the Linux kernel). This has grown over time,
+// so here we allow 48 bit addresses.
+//
+// In addition to the 16 bits taken from the top, we can take 3 from the
+// bottom, because node must be pointer-aligned, giving a total of 19 bits
+// of count.
+const (
+ addrBits = 48
+ cntBits = 64 - addrBits + 3
+)
+
+func lfstackPack(node *lfnode, cnt uintptr) uint64 {
+ return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<<cntBits-1))
+}
+
+func lfstackUnpack(val uint64) (node *lfnode, cnt uintptr) {
+ node = (*lfnode)(unsafe.Pointer(uintptr(val >> cntBits << 3)))
+ cnt = uintptr(val & (1<<cntBits - 1))
+ return
+}
diff --git a/libgo/go/runtime/malloc_test.go b/libgo/go/runtime/malloc_test.go
index df6a0e5..4f926276 100644
--- a/libgo/go/runtime/malloc_test.go
+++ b/libgo/go/runtime/malloc_test.go
@@ -18,13 +18,14 @@ func TestMemStats(t *testing.T) {
st := new(MemStats)
ReadMemStats(st)
- // Everything except HeapReleased and HeapIdle, because they indeed can be 0.
+ // Everything except HeapReleased, HeapIdle, and NumGC,
+ // because they indeed can be 0.
if st.Alloc == 0 || st.TotalAlloc == 0 || st.Sys == 0 || st.Lookups == 0 ||
st.Mallocs == 0 || st.Frees == 0 || st.HeapAlloc == 0 || st.HeapSys == 0 ||
st.HeapInuse == 0 || st.HeapObjects == 0 || st.StackInuse == 0 ||
st.StackSys == 0 || st.MSpanInuse == 0 || st.MSpanSys == 0 || st.MCacheInuse == 0 ||
st.MCacheSys == 0 || st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 ||
- st.NextGC == 0 || st.NumGC == 0 {
+ st.NextGC == 0 {
t.Fatalf("Zero value: %+v", *st)
}
@@ -58,6 +59,14 @@ func TestMemStats(t *testing.T) {
if st.PauseTotalNs != pauseTotal {
t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
}
+ for i := int(st.NumGC); i < len(st.PauseNs); i++ {
+ if st.PauseNs[i] != 0 {
+ t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
+ }
+ if st.PauseEnd[i] != 0 {
+ t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
+ }
+ }
} else {
if st.PauseTotalNs < pauseTotal {
t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
@@ -83,6 +92,23 @@ func TestStringConcatenationAllocs(t *testing.T) {
}
}
+func TestTinyAlloc(t *testing.T) {
+ const N = 16
+ var v [N]unsafe.Pointer
+ for i := range v {
+ v[i] = unsafe.Pointer(new(byte))
+ }
+
+ chunks := make(map[uintptr]bool, N)
+ for _, p := range v {
+ chunks[uintptr(p)&^7] = true
+ }
+
+ if len(chunks) == N {
+ t.Fatal("no bytes allocated within the same 8-byte chunk")
+ }
+}
+
var mallocSink uintptr
func BenchmarkMalloc8(b *testing.B) {
diff --git a/libgo/go/runtime/mkfastlog2table.go b/libgo/go/runtime/mkfastlog2table.go
new file mode 100644
index 0000000..587ebf4
--- /dev/null
+++ b/libgo/go/runtime/mkfastlog2table.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// fastlog2Table contains log2 approximations for 5 binary digits.
+// This is used to implement fastlog2, which is used for heap sampling.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math"
+)
+
+func main() {
+ var buf bytes.Buffer
+
+ fmt.Fprintln(&buf, "// AUTO-GENERATED by mkfastlog2table.go")
+ fmt.Fprintln(&buf, "// Run go generate from src/runtime to update.")
+ fmt.Fprintln(&buf, "// See mkfastlog2table.go for comments.")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "package runtime")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "const fastlogNumBits =", fastlogNumBits)
+ fmt.Fprintln(&buf)
+
+ fmt.Fprintln(&buf, "var fastlog2Table = [1<<fastlogNumBits + 1]float64{")
+ table := computeTable()
+ for _, t := range table {
+ fmt.Fprintf(&buf, "\t%v,\n", t)
+ }
+ fmt.Fprintln(&buf, "}")
+
+ if err := ioutil.WriteFile("fastlog2table.go", buf.Bytes(), 0644); err != nil {
+ log.Fatalln(err)
+ }
+}
+
+const fastlogNumBits = 5
+
+func computeTable() []float64 {
+ fastlog2Table := make([]float64, 1<<fastlogNumBits+1)
+ for i := 0; i <= (1 << fastlogNumBits); i++ {
+ fastlog2Table[i] = math.Log2(1.0 + float64(i)/(1<<fastlogNumBits))
+ }
+ return fastlog2Table
+}
diff --git a/libgo/go/runtime/mmap.go b/libgo/go/runtime/mmap.go
new file mode 100644
index 0000000..a076842
--- /dev/null
+++ b/libgo/go/runtime/mmap.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+// +build !solaris
+// +build !windows
+// +build !nacl
+// +build !linux !amd64
+
+package runtime
+
+import "unsafe"
+
+// mmap calls the mmap system call. It is implemented in assembly.
+func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
diff --git a/libgo/go/runtime/msan.go b/libgo/go/runtime/msan.go
new file mode 100644
index 0000000..4dbdf05
--- /dev/null
+++ b/libgo/go/runtime/msan.go
@@ -0,0 +1,55 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build msan
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+// Public memory sanitizer API.
+
+func MSanRead(addr unsafe.Pointer, len int) {
+ msanread(addr, uintptr(len))
+}
+
+func MSanWrite(addr unsafe.Pointer, len int) {
+ msanwrite(addr, uintptr(len))
+}
+
+// Private interface for the runtime.
+const msanenabled = true
+
+// If we are running on the system stack, the C program may have
+// marked part of that stack as uninitialized. We don't instrument
+// the runtime, but operations like a slice copy can call msanread
+// anyhow for values on the stack. Just ignore msanread when running
+// on the system stack. The other msan functions are fine.
+func msanread(addr unsafe.Pointer, sz uintptr) {
+ g := getg()
+ if g == g.m.g0 || g == g.m.gsignal {
+ return
+ }
+ domsanread(addr, sz)
+}
+
+//go:noescape
+func domsanread(addr unsafe.Pointer, sz uintptr)
+
+//go:noescape
+func msanwrite(addr unsafe.Pointer, sz uintptr)
+
+//go:noescape
+func msanmalloc(addr unsafe.Pointer, sz uintptr)
+
+//go:noescape
+func msanfree(addr unsafe.Pointer, sz uintptr)
+
+// These are called from msan_amd64.s
+//go:cgo_import_static __msan_read_go
+//go:cgo_import_static __msan_write_go
+//go:cgo_import_static __msan_malloc_go
+//go:cgo_import_static __msan_free_go
diff --git a/libgo/go/runtime/msan/msan.go b/libgo/go/runtime/msan/msan.go
new file mode 100644
index 0000000..b6ea3f0
--- /dev/null
+++ b/libgo/go/runtime/msan/msan.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build msan,linux,amd64
+
+package msan
+
+/*
+#cgo CFLAGS: -fsanitize=memory
+#cgo LDFLAGS: -fsanitize=memory
+
+#include <stdint.h>
+#include <sanitizer/msan_interface.h>
+
+void __msan_read_go(void *addr, uintptr_t sz) {
+ __msan_check_mem_is_initialized(addr, sz);
+}
+
+void __msan_write_go(void *addr, uintptr_t sz) {
+ __msan_unpoison(addr, sz);
+}
+
+void __msan_malloc_go(void *addr, uintptr_t sz) {
+ __msan_unpoison(addr, sz);
+}
+
+void __msan_free_go(void *addr, uintptr_t sz) {
+ __msan_poison(addr, sz);
+}
+*/
+import "C"
diff --git a/libgo/go/runtime/msan0.go b/libgo/go/runtime/msan0.go
new file mode 100644
index 0000000..e206720
--- /dev/null
+++ b/libgo/go/runtime/msan0.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !msan
+
+// Dummy MSan support API, used when not built with -msan.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+const msanenabled = false
+
+// Because msanenabled is false, none of these functions should be called.
+
+func msanread(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanwrite(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanmalloc(addr unsafe.Pointer, sz uintptr) { throw("msan") }
+func msanfree(addr unsafe.Pointer, sz uintptr) { throw("msan") }
diff --git a/libgo/go/runtime/mstkbar.go b/libgo/go/runtime/mstkbar.go
new file mode 100644
index 0000000..016625a
--- /dev/null
+++ b/libgo/go/runtime/mstkbar.go
@@ -0,0 +1,365 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector: stack barriers
+//
+// Stack barriers enable the garbage collector to determine how much
+// of a gorountine stack has changed between when a stack is scanned
+// during the concurrent scan phase and when it is re-scanned during
+// the stop-the-world mark termination phase. Mark termination only
+// needs to re-scan the changed part, so for deep stacks this can
+// significantly reduce GC pause time compared to the alternative of
+// re-scanning whole stacks. The deeper the stacks, the more stack
+// barriers help.
+//
+// When stacks are scanned during the concurrent scan phase, the stack
+// scan installs stack barriers by selecting stack frames and
+// overwriting the saved return PCs (or link registers) of these
+// frames with the PC of a "stack barrier trampoline". Later, when a
+// selected frame returns, it "returns" to this trampoline instead of
+// returning to its actual caller. The trampoline records that the
+// stack has unwound past this frame and jumps to the original return
+// PC recorded when the stack barrier was installed. Mark termination
+// re-scans only as far as the first frame that hasn't hit a stack
+// barrier and then removes and un-hit stack barriers.
+//
+// This scheme is very lightweight. No special code is required in the
+// mutator to record stack unwinding and the trampoline is only a few
+// assembly instructions.
+//
+// Book-keeping
+// ------------
+//
+// The primary cost of stack barriers is book-keeping: the runtime has
+// to record the locations of all stack barriers and the original
+// return PCs in order to return to the correct caller when a stack
+// barrier is hit and so it can remove un-hit stack barriers. In order
+// to minimize this cost, the Go runtime places stack barriers in
+// exponentially-spaced frames, starting 1K past the current frame.
+// The book-keeping structure hence grows logarithmically with the
+// size of the stack and mark termination re-scans at most twice as
+// much stack as necessary.
+//
+// The runtime reserves space for this book-keeping structure at the
+// top of the stack allocation itself (just above the outermost
+// frame). This is necessary because the regular memory allocator can
+// itself grow the stack, and hence can't be used when allocating
+// stack-related structures.
+//
+// For debugging, the runtime also supports installing stack barriers
+// at every frame. However, this requires significantly more
+// book-keeping space.
+//
+// Correctness
+// -----------
+//
+// The runtime and the compiler cooperate to ensure that all objects
+// reachable from the stack as of mark termination are marked.
+// Anything unchanged since the concurrent scan phase will be marked
+// because it is marked by the concurrent scan. After the concurrent
+// scan, there are three possible classes of stack modifications that
+// must be tracked:
+//
+// 1) Mutator writes below the lowest un-hit stack barrier. This
+// includes all writes performed by an executing function to its own
+// stack frame. This part of the stack will be re-scanned by mark
+// termination, which will mark any objects made reachable from
+// modifications to this part of the stack.
+//
+// 2) Mutator writes above the lowest un-hit stack barrier. It's
+// possible for a mutator to modify the stack above the lowest un-hit
+// stack barrier if a higher frame has passed down a pointer to a
+// stack variable in its frame. This is called an "up-pointer". The
+// compiler ensures that writes through up-pointers have an
+// accompanying write barrier (it simply doesn't distinguish between
+// writes through up-pointers and writes through heap pointers). This
+// write barrier marks any object made reachable from modifications to
+// this part of the stack.
+//
+// 3) Runtime writes to the stack. Various runtime operations such as
+// sends to unbuffered channels can write to arbitrary parts of the
+// stack, including above the lowest un-hit stack barrier. We solve
+// this in two ways. In many cases, the runtime can perform an
+// explicit write barrier operation like in case 2. However, in the
+// case of bulk memory move (typedmemmove), the runtime doesn't
+// necessary have ready access to a pointer bitmap for the memory
+// being copied, so it simply unwinds any stack barriers below the
+// destination.
+//
+// Gotchas
+// -------
+//
+// Anything that inspects or manipulates the stack potentially needs
+// to understand stack barriers. The most obvious case is that
+// gentraceback needs to use the original return PC when it encounters
+// the stack barrier trampoline. Anything that unwinds the stack such
+// as panic/recover must unwind stack barriers in tandem with
+// unwinding the stack.
+//
+// Stack barriers require that any goroutine whose stack has been
+// scanned must execute write barriers. Go solves this by simply
+// enabling write barriers globally during the concurrent scan phase.
+// However, traditionally, write barriers are not enabled during this
+// phase.
+//
+// Synchronization
+// ---------------
+//
+// For the most part, accessing and modifying stack barriers is
+// synchronized around GC safe points. Installing stack barriers
+// forces the G to a safe point, while all other operations that
+// modify stack barriers run on the G and prevent it from reaching a
+// safe point.
+//
+// Subtlety arises when a G may be tracebacked when *not* at a safe
+// point. This happens during sigprof. For this, each G has a "stack
+// barrier lock" (see gcLockStackBarriers, gcUnlockStackBarriers).
+// Operations that manipulate stack barriers acquire this lock, while
+// sigprof tries to acquire it and simply skips the traceback if it
+// can't acquire it. There is one exception for performance and
+// complexity reasons: hitting a stack barrier manipulates the stack
+// barrier list without acquiring the stack barrier lock. For this,
+// gentraceback performs a special fix up if the traceback starts in
+// the stack barrier function.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+const debugStackBarrier = false
+
+// firstStackBarrierOffset is the approximate byte offset at
+// which to place the first stack barrier from the current SP.
+// This is a lower bound on how much stack will have to be
+// re-scanned during mark termination. Subsequent barriers are
+// placed at firstStackBarrierOffset * 2^n offsets.
+//
+// For debugging, this can be set to 0, which will install a
+// stack barrier at every frame. If you do this, you may also
+// have to raise _StackMin, since the stack barrier
+// bookkeeping will use a large amount of each stack.
+var firstStackBarrierOffset = 1024
+
+// gcMaxStackBarriers returns the maximum number of stack barriers
+// that can be installed in a stack of stackSize bytes.
+func gcMaxStackBarriers(stackSize int) (n int) {
+ if firstStackBarrierOffset == 0 {
+ // Special debugging case for inserting stack barriers
+ // at every frame. Steal half of the stack for the
+ // []stkbar. Technically, if the stack were to consist
+ // solely of return PCs we would need two thirds of
+ // the stack, but stealing that much breaks things and
+ // this doesn't happen in practice.
+ return stackSize / 2 / int(unsafe.Sizeof(stkbar{}))
+ }
+
+ offset := firstStackBarrierOffset
+ for offset < stackSize {
+ n++
+ offset *= 2
+ }
+ return n + 1
+}
+
+// gcInstallStackBarrier installs a stack barrier over the return PC of frame.
+//go:nowritebarrier
+func gcInstallStackBarrier(gp *g, frame *stkframe) bool {
+ if frame.lr == 0 {
+ if debugStackBarrier {
+ print("not installing stack barrier with no LR, goid=", gp.goid, "\n")
+ }
+ return false
+ }
+
+ if frame.fn.entry == cgocallback_gofuncPC {
+ // cgocallback_gofunc doesn't return to its LR;
+ // instead, its return path puts LR in g.sched.pc and
+ // switches back to the system stack on which
+ // cgocallback_gofunc was originally called. We can't
+ // have a stack barrier in g.sched.pc, so don't
+ // install one in this frame.
+ if debugStackBarrier {
+ print("not installing stack barrier over LR of cgocallback_gofunc, goid=", gp.goid, "\n")
+ }
+ return false
+ }
+
+ // Save the return PC and overwrite it with stackBarrier.
+ var lrUintptr uintptr
+ if usesLR {
+ lrUintptr = frame.sp
+ } else {
+ lrUintptr = frame.fp - sys.RegSize
+ }
+ lrPtr := (*sys.Uintreg)(unsafe.Pointer(lrUintptr))
+ if debugStackBarrier {
+ print("install stack barrier at ", hex(lrUintptr), " over ", hex(*lrPtr), ", goid=", gp.goid, "\n")
+ if uintptr(*lrPtr) != frame.lr {
+ print("frame.lr=", hex(frame.lr))
+ throw("frame.lr differs from stack LR")
+ }
+ }
+
+ gp.stkbar = gp.stkbar[:len(gp.stkbar)+1]
+ stkbar := &gp.stkbar[len(gp.stkbar)-1]
+ stkbar.savedLRPtr = lrUintptr
+ stkbar.savedLRVal = uintptr(*lrPtr)
+ *lrPtr = sys.Uintreg(stackBarrierPC)
+ return true
+}
+
+// gcRemoveStackBarriers removes all stack barriers installed in gp's stack.
+//go:nowritebarrier
+func gcRemoveStackBarriers(gp *g) {
+ if debugStackBarrier && gp.stkbarPos != 0 {
+ print("hit ", gp.stkbarPos, " stack barriers, goid=", gp.goid, "\n")
+ }
+
+ gcLockStackBarriers(gp)
+
+ // Remove stack barriers that we didn't hit.
+ for _, stkbar := range gp.stkbar[gp.stkbarPos:] {
+ gcRemoveStackBarrier(gp, stkbar)
+ }
+
+ // Clear recorded stack barriers so copystack doesn't try to
+ // adjust them.
+ gp.stkbarPos = 0
+ gp.stkbar = gp.stkbar[:0]
+
+ gcUnlockStackBarriers(gp)
+}
+
+// gcRemoveStackBarrier removes a single stack barrier. It is the
+// inverse operation of gcInstallStackBarrier.
+//
+// This is nosplit to ensure gp's stack does not move.
+//
+//go:nowritebarrier
+//go:nosplit
+func gcRemoveStackBarrier(gp *g, stkbar stkbar) {
+ if debugStackBarrier {
+ print("remove stack barrier at ", hex(stkbar.savedLRPtr), " with ", hex(stkbar.savedLRVal), ", goid=", gp.goid, "\n")
+ }
+ lrPtr := (*sys.Uintreg)(unsafe.Pointer(stkbar.savedLRPtr))
+ if val := *lrPtr; val != sys.Uintreg(stackBarrierPC) {
+ printlock()
+ print("at *", hex(stkbar.savedLRPtr), " expected stack barrier PC ", hex(stackBarrierPC), ", found ", hex(val), ", goid=", gp.goid, "\n")
+ print("gp.stkbar=")
+ gcPrintStkbars(gp, -1)
+ print(", gp.stack=[", hex(gp.stack.lo), ",", hex(gp.stack.hi), ")\n")
+ throw("stack barrier lost")
+ }
+ *lrPtr = sys.Uintreg(stkbar.savedLRVal)
+}
+
+// gcPrintStkbars prints the stack barriers of gp for debugging. It
+// places a "@@@" marker at gp.stkbarPos. If marker >= 0, it will also
+// place a "==>" marker before the marker'th entry.
+func gcPrintStkbars(gp *g, marker int) {
+ print("[")
+ for i, s := range gp.stkbar {
+ if i > 0 {
+ print(" ")
+ }
+ if i == int(gp.stkbarPos) {
+ print("@@@ ")
+ }
+ if i == marker {
+ print("==> ")
+ }
+ print("*", hex(s.savedLRPtr), "=", hex(s.savedLRVal))
+ }
+ if int(gp.stkbarPos) == len(gp.stkbar) {
+ print(" @@@")
+ }
+ if marker == len(gp.stkbar) {
+ print(" ==>")
+ }
+ print("]")
+}
+
+// gcUnwindBarriers marks all stack barriers up the frame containing
+// sp as hit and removes them. This is used during stack unwinding for
+// panic/recover and by heapBitsBulkBarrier to force stack re-scanning
+// when its destination is on the stack.
+//
+// This is nosplit to ensure gp's stack does not move.
+//
+//go:nosplit
+func gcUnwindBarriers(gp *g, sp uintptr) {
+ gcLockStackBarriers(gp)
+ // On LR machines, if there is a stack barrier on the return
+ // from the frame containing sp, this will mark it as hit even
+ // though it isn't, but it's okay to be conservative.
+ before := gp.stkbarPos
+ for int(gp.stkbarPos) < len(gp.stkbar) && gp.stkbar[gp.stkbarPos].savedLRPtr < sp {
+ gcRemoveStackBarrier(gp, gp.stkbar[gp.stkbarPos])
+ gp.stkbarPos++
+ }
+ gcUnlockStackBarriers(gp)
+ if debugStackBarrier && gp.stkbarPos != before {
+ print("skip barriers below ", hex(sp), " in goid=", gp.goid, ": ")
+ // We skipped barriers between the "==>" marker
+ // (before) and the "@@@" marker (gp.stkbarPos).
+ gcPrintStkbars(gp, int(before))
+ print("\n")
+ }
+}
+
+// nextBarrierPC returns the original return PC of the next stack barrier.
+// Used by getcallerpc, so it must be nosplit.
+//go:nosplit
+func nextBarrierPC() uintptr {
+ gp := getg()
+ return gp.stkbar[gp.stkbarPos].savedLRVal
+}
+
+// setNextBarrierPC sets the return PC of the next stack barrier.
+// Used by setcallerpc, so it must be nosplit.
+//go:nosplit
+func setNextBarrierPC(pc uintptr) {
+ gp := getg()
+ gcLockStackBarriers(gp)
+ gp.stkbar[gp.stkbarPos].savedLRVal = pc
+ gcUnlockStackBarriers(gp)
+}
+
+// gcLockStackBarriers synchronizes with tracebacks of gp's stack
+// during sigprof for installation or removal of stack barriers. It
+// blocks until any current sigprof is done tracebacking gp's stack
+// and then disallows profiling tracebacks of gp's stack.
+//
+// This is necessary because a sigprof during barrier installation or
+// removal could observe inconsistencies between the stkbar array and
+// the stack itself and crash.
+//
+//go:nosplit
+func gcLockStackBarriers(gp *g) {
+ // Disable preemption so scanstack cannot run while the caller
+ // is manipulating the stack barriers.
+ acquirem()
+ for !atomic.Cas(&gp.stackLock, 0, 1) {
+ osyield()
+ }
+}
+
+//go:nosplit
+func gcTryLockStackBarriers(gp *g) bool {
+ mp := acquirem()
+ result := atomic.Cas(&gp.stackLock, 0, 1)
+ if !result {
+ releasem(mp)
+ }
+ return result
+}
+
+func gcUnlockStackBarriers(gp *g) {
+ atomic.Store(&gp.stackLock, 0)
+ releasem(getg().m)
+}
diff --git a/libgo/go/runtime/os1_linux_generic.go b/libgo/go/runtime/os1_linux_generic.go
new file mode 100644
index 0000000..2c8b743
--- /dev/null
+++ b/libgo/go/runtime/os1_linux_generic.go
@@ -0,0 +1,27 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !mips64
+// +build !mips64le
+// +build linux
+
+package runtime
+
+var sigset_all = sigset{^uint32(0), ^uint32(0)}
+
+func sigaddset(mask *sigset, i int) {
+ (*mask)[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
+}
+
+func sigdelset(mask *sigset, i int) {
+ (*mask)[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
+}
+
+func sigfillset(mask *uint64) {
+ *mask = ^uint64(0)
+}
+
+func sigcopyset(mask *sigset, m sigmask) {
+ copy((*mask)[:], m[:])
+}
diff --git a/libgo/go/runtime/os1_linux_mips64x.go b/libgo/go/runtime/os1_linux_mips64x.go
new file mode 100644
index 0000000..701e979
--- /dev/null
+++ b/libgo/go/runtime/os1_linux_mips64x.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips64 mips64le
+// +build linux
+
+package runtime
+
+var sigset_all = sigset{^uint64(0), ^uint64(0)}
+
+func sigaddset(mask *sigset, i int) {
+ (*mask)[(i-1)/64] |= 1 << ((uint32(i) - 1) & 63)
+}
+
+func sigdelset(mask *sigset, i int) {
+ (*mask)[(i-1)/64] &^= 1 << ((uint32(i) - 1) & 63)
+}
+
+func sigfillset(mask *[2]uint64) {
+ (*mask)[0], (*mask)[1] = ^uint64(0), ^uint64(0)
+}
+
+func sigcopyset(mask *sigset, m sigmask) {
+ (*mask)[0] = uint64(m[0]) | uint64(m[1])<<32
+}
diff --git a/libgo/go/runtime/os2_linux_generic.go b/libgo/go/runtime/os2_linux_generic.go
new file mode 100644
index 0000000..01e6c8a
--- /dev/null
+++ b/libgo/go/runtime/os2_linux_generic.go
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !mips64
+// +build !mips64le
+// +build linux
+
+package runtime
+
+const (
+ _SS_DISABLE = 2
+ _NSIG = 65
+ _SI_USER = 0
+ _SIG_BLOCK = 0
+ _SIG_UNBLOCK = 1
+ _SIG_SETMASK = 2
+ _RLIMIT_AS = 9
+)
+
+// It's hard to tease out exactly how big a Sigset is, but
+// rt_sigprocmask crashes if we get it wrong, so if binaries
+// are running, this is right.
+type sigset [2]uint32
+
+type rlimit struct {
+ rlim_cur uintptr
+ rlim_max uintptr
+}
diff --git a/libgo/go/runtime/os2_linux_mips64x.go b/libgo/go/runtime/os2_linux_mips64x.go
new file mode 100644
index 0000000..9a6a92a
--- /dev/null
+++ b/libgo/go/runtime/os2_linux_mips64x.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+
+package runtime
+
+const (
+ _SS_DISABLE = 2
+ _NSIG = 65
+ _SI_USER = 0
+ _SIG_BLOCK = 1
+ _SIG_UNBLOCK = 2
+ _SIG_SETMASK = 3
+ _RLIMIT_AS = 6
+)
+
+type sigset [2]uint64
+
+type rlimit struct {
+ rlim_cur uintptr
+ rlim_max uintptr
+}
diff --git a/libgo/go/runtime/os_android.go b/libgo/go/runtime/os_android.go
new file mode 100644
index 0000000..52c8c86
--- /dev/null
+++ b/libgo/go/runtime/os_android.go
@@ -0,0 +1,15 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import _ "unsafe" // for go:cgo_export_static and go:cgo_export_dynamic
+
+// Export the main function.
+//
+// Used by the app package to start all-Go Android apps that are
+// loaded via JNI. See golang.org/x/mobile/app.
+
+//go:cgo_export_static main.main
+//go:cgo_export_dynamic main.main
diff --git a/libgo/go/runtime/os_linux_mips64x.go b/libgo/go/runtime/os_linux_mips64x.go
new file mode 100644
index 0000000..4d2e9e8
--- /dev/null
+++ b/libgo/go/runtime/os_linux_mips64x.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips64 mips64le
+// +build linux
+
+package runtime
+
+var randomNumber uint32
+
+//go:nosplit
+func cputicks() int64 {
+ // Currently cputicks() is used in blocking profiler and to seed fastrand1().
+ // nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
+ // randomNumber provides better seeding of fastrand1.
+ return nanotime() + int64(randomNumber)
+}
diff --git a/libgo/go/runtime/pprof/mprof_test.go b/libgo/go/runtime/pprof/mprof_test.go
index 44a3850..bfa7b3b 100644
--- a/libgo/go/runtime/pprof/mprof_test.go
+++ b/libgo/go/runtime/pprof/mprof_test.go
@@ -22,11 +22,8 @@ func allocateTransient1M() {
}
}
+//go:noinline
func allocateTransient2M() {
- // prevent inlining
- if memSink == nil {
- panic("bad")
- }
memSink = make([]byte, 2<<20)
}
@@ -75,21 +72,22 @@ func TestMemoryProfiler(t *testing.T) {
memoryProfilerRun++
tests := []string{
+
fmt.Sprintf(`%v: %v \[%v: %v\] @ 0x[0-9,a-f x]+
-# 0x[0-9,a-f]+ pprof_test\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:43
-# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:66
+# 0x[0-9,a-f]+ pprof_test\.allocatePersistent1K\+0x[0-9,a-f]+ .*/mprof_test\.go:40
+# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test\.go:63
`, 32*memoryProfilerRun, 1024*memoryProfilerRun, 32*memoryProfilerRun, 1024*memoryProfilerRun),
fmt.Sprintf(`0: 0 \[%v: %v\] @ 0x[0-9,a-f x]+
# 0x[0-9,a-f]+ pprof_test\.allocateTransient1M\+0x[0-9,a-f]+ .*/mprof_test.go:21
-# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:64
+# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:61
`, (1<<10)*memoryProfilerRun, (1<<20)*memoryProfilerRun),
// This should start with "0: 0" but gccgo's imprecise
// GC means that sometimes the value is not collected.
fmt.Sprintf(`(0|%v): (0|%v) \[%v: %v\] @ 0x[0-9,a-f x]+
-# 0x[0-9,a-f]+ pprof_test\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:30
-# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:65
+# 0x[0-9,a-f]+ pprof_test\.allocateTransient2M\+0x[0-9,a-f]+ .*/mprof_test.go:27
+# 0x[0-9,a-f]+ runtime_pprof_test\.TestMemoryProfiler\+0x[0-9,a-f]+ .*/mprof_test.go:62
`, memoryProfilerRun, (2<<20)*memoryProfilerRun, memoryProfilerRun, (2<<20)*memoryProfilerRun),
}
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index dcf67cd..fa11fda 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -20,8 +20,8 @@ import (
"text/tabwriter"
)
-// BUG(rsc): Profiles are incomplete and inaccurate on NetBSD and OS X.
-// See https://golang.org/issue/6047 for details.
+// BUG(rsc): Profiles are only as good as the kernel support used to generate them.
+// See https://golang.org/issue/13841 for details about known problems.
// A Profile is a collection of stack traces showing the call sequences
// that led to instances of a particular event, such as allocation.
@@ -579,6 +579,14 @@ var cpu struct {
// StartCPUProfile enables CPU profiling for the current process.
// While profiling, the profile will be buffered and written to w.
// StartCPUProfile returns an error if profiling is already enabled.
+//
+// On Unix-like systems, StartCPUProfile does not work by default for
+// Go code built with -buildmode=c-archive or -buildmode=c-shared.
+// StartCPUProfile relies on the SIGPROF signal, but that signal will
+// be delivered to the main program's SIGPROF signal handler (if any)
+// not to the one used by Go. To make it work, call os/signal.Notify
+// for syscall.SIGPROF, but note that doing so may break any profiling
+// being done by the main program.
func StartCPUProfile(w io.Writer) error {
// The runtime routines allow a variable profiling rate,
// but in practice operating systems cannot trigger signals
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index c32b847..244be05 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -23,14 +23,14 @@ import (
"unsafe"
)
-func cpuHogger(f func()) {
+func cpuHogger(f func(), dur time.Duration) {
// We only need to get one 100 Hz clock tick, so we've got
- // a 25x safety buffer.
+ // a large safety buffer.
// But do at least 500 iterations (which should take about 100ms),
// otherwise TestCPUProfileMultithreaded can fail if only one
- // thread is scheduled during the 250ms period.
+ // thread is scheduled during the testing period.
t0 := time.Now()
- for i := 0; i < 500 || time.Since(t0) < 250*time.Millisecond; i++ {
+ for i := 0; i < 500 || time.Since(t0) < dur; i++ {
f()
}
}
@@ -68,20 +68,20 @@ func cpuHog2() {
}
func TestCPUProfile(t *testing.T) {
- testCPUProfile(t, []string{"pprof_test.cpuHog1"}, func() {
- cpuHogger(cpuHog1)
+ testCPUProfile(t, []string{"pprof_test.cpuHog1"}, func(dur time.Duration) {
+ cpuHogger(cpuHog1, dur)
})
}
func TestCPUProfileMultithreaded(t *testing.T) {
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
- testCPUProfile(t, []string{"pprof_test.cpuHog1", "pprof_test.cpuHog2"}, func() {
+ testCPUProfile(t, []string{"pprof_test.cpuHog1", "pprof_test.cpuHog2"}, func(dur time.Duration) {
c := make(chan int)
go func() {
- cpuHogger(cpuHog1)
+ cpuHogger(cpuHog1, dur)
c <- 1
}()
- cpuHogger(cpuHog2)
+ cpuHogger(cpuHog2, dur)
<-c
})
}
@@ -92,8 +92,8 @@ func parseProfile(t *testing.T, bytes []byte, f func(uintptr, []uintptr)) {
val := *(*[]uintptr)(unsafe.Pointer(&bytes))
val = val[:l]
- // 5 for the header, 2 for the per-sample header on at least one sample, 3 for the trailer.
- if l < 5+2+3 {
+ // 5 for the header, 3 for the trailer.
+ if l < 5+3 {
t.Logf("profile too short: %#x", val)
if badOS[runtime.GOOS] {
t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS)
@@ -120,7 +120,7 @@ func parseProfile(t *testing.T, bytes []byte, f func(uintptr, []uintptr)) {
}
}
-func testCPUProfile(t *testing.T, need []string, f func()) {
+func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
switch runtime.GOOS {
case "darwin":
switch runtime.GOARCH {
@@ -138,12 +138,55 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
t.Skip("skipping on plan9")
}
- var prof bytes.Buffer
- if err := StartCPUProfile(&prof); err != nil {
- t.Fatal(err)
+ const maxDuration = 5 * time.Second
+ // If we're running a long test, start with a long duration
+ // because some of the tests (e.g., TestStackBarrierProfiling)
+ // are trying to make sure something *doesn't* happen.
+ duration := 5 * time.Second
+ if testing.Short() {
+ duration = 200 * time.Millisecond
+ }
+
+ // Profiling tests are inherently flaky, especially on a
+ // loaded system, such as when this test is running with
+ // several others under go test std. If a test fails in a way
+ // that could mean it just didn't run long enough, try with a
+ // longer duration.
+ for duration <= maxDuration {
+ var prof bytes.Buffer
+ if err := StartCPUProfile(&prof); err != nil {
+ t.Fatal(err)
+ }
+ f(duration)
+ StopCPUProfile()
+
+ if profileOk(t, need, prof, duration) {
+ return
+ }
+
+ duration *= 2
+ if duration <= maxDuration {
+ t.Logf("retrying with %s duration", duration)
+ }
+ }
+
+ if badOS[runtime.GOOS] {
+ t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS)
+ return
}
- f()
- StopCPUProfile()
+ // Ignore the failure if the tests are running in a QEMU-based emulator,
+ // QEMU is not perfect at emulating everything.
+ // IN_QEMU environmental variable is set by some of the Go builders.
+ // IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
+ if os.Getenv("IN_QEMU") == "1" {
+ t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
+ return
+ }
+ t.FailNow()
+}
+
+func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
+ ok = true
// Check that profile is well formed and contains need.
have := make([]uintptr, len(need))
@@ -161,6 +204,10 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
have[i] += count
}
}
+ if strings.Contains(f.Name(), "stackBarrier") {
+ // The runtime should have unwound this.
+ t.Fatalf("profile includes stackBarrier")
+ }
}
})
t.Logf("total %d CPU profile samples collected", samples)
@@ -169,11 +216,18 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
// On some windows machines we end up with
// not enough samples due to coarse timer
// resolution. Let it go.
- t.Skip("too few samples on Windows (golang.org/issue/10842)")
+ t.Log("too few samples on Windows (golang.org/issue/10842)")
+ return false
+ }
+
+ // Check that we got a reasonable number of samples.
+ if ideal := uintptr(duration * 100 / time.Second); samples == 0 || samples < ideal/4 {
+ t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
+ ok = false
}
if len(need) == 0 {
- return
+ return ok
}
var total uintptr
@@ -181,9 +235,8 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
total += have[i]
t.Logf("%s: %d\n", name, have[i])
}
- ok := true
if total == 0 {
- t.Logf("no CPU profile samples collected")
+ t.Logf("no samples in expected functions")
ok = false
}
// We'd like to check a reasonable minimum, like
@@ -197,22 +250,7 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
ok = false
}
}
-
- if !ok {
- if badOS[runtime.GOOS] {
- t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS)
- return
- }
- // Ignore the failure if the tests are running in a QEMU-based emulator,
- // QEMU is not perfect at emulating everything.
- // IN_QEMU environmental variable is set by some of the Go builders.
- // IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
- if os.Getenv("IN_QEMU") == "1" {
- t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
- return
- }
- t.FailNow()
- }
+ return ok
}
// Fork can hang if preempted with signals frequently enough (see issue 5517).
@@ -307,8 +345,8 @@ func TestGoroutineSwitch(t *testing.T) {
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {
- testCPUProfile(t, nil, func() {
- t := time.After(5 * time.Second)
+ testCPUProfile(t, nil, func(duration time.Duration) {
+ t := time.After(duration)
pi := new(big.Int)
for {
for i := 0; i < 100; i++ {
@@ -325,6 +363,64 @@ func TestMathBigDivide(t *testing.T) {
})
}
+func TestStackBarrierProfiling(t *testing.T) {
+ if (runtime.GOOS == "linux" && runtime.GOARCH == "arm") || runtime.GOOS == "openbsd" || runtime.GOOS == "solaris" || runtime.GOOS == "dragonfly" || runtime.GOOS == "freebsd" {
+ // This test currently triggers a large number of
+ // usleep(100)s. These kernels/arches have poor
+ // resolution timers, so this gives up a whole
+ // scheduling quantum. On Linux and the BSDs (and
+ // probably Solaris), profiling signals are only
+ // generated when a process completes a whole
+ // scheduling quantum, so this test often gets zero
+ // profiling signals and fails.
+ t.Skipf("low resolution timers inhibit profiling signals (golang.org/issue/13405)")
+ return
+ }
+
+ if !strings.Contains(os.Getenv("GODEBUG"), "gcstackbarrierall=1") {
+ // Re-execute this test with constant GC and stack
+ // barriers at every frame.
+ testenv.MustHaveExec(t)
+ if runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" {
+ t.Skip("gcstackbarrierall doesn't work on ppc64")
+ }
+ args := []string{"-test.run=TestStackBarrierProfiling"}
+ if testing.Short() {
+ args = append(args, "-test.short")
+ }
+ cmd := exec.Command(os.Args[0], args...)
+ cmd.Env = append([]string{"GODEBUG=gcstackbarrierall=1", "GOGC=1"}, os.Environ()...)
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Fatalf("subprocess failed with %v:\n%s", err, out)
+ }
+ return
+ }
+
+ testCPUProfile(t, nil, func(duration time.Duration) {
+ // In long mode, we're likely to get one or two
+ // samples in stackBarrier.
+ t := time.After(duration)
+ for {
+ deepStack(1000)
+ select {
+ case <-t:
+ return
+ default:
+ }
+ }
+ })
+}
+
+var x []byte
+
+func deepStack(depth int) int {
+ if depth == 0 {
+ return 0
+ }
+ x = make([]byte, 1024)
+ return deepStack(depth-1) + 1
+}
+
// Operating systems that are expected to fail the tests. See issue 6047.
var badOS = map[string]bool{
"darwin": true,
diff --git a/libgo/go/runtime/print.go b/libgo/go/runtime/print.go
new file mode 100644
index 0000000..f789f89
--- /dev/null
+++ b/libgo/go/runtime/print.go
@@ -0,0 +1,221 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// The compiler knows that a print of a value of this type
+// should use printhex instead of printuint (decimal).
+type hex uint64
+
+func bytes(s string) (ret []byte) {
+ rp := (*slice)(unsafe.Pointer(&ret))
+ sp := stringStructOf(&s)
+ rp.array = sp.str
+ rp.len = sp.len
+ rp.cap = sp.len
+ return
+}
+
+var debuglock mutex
+
+// The compiler emits calls to printlock and printunlock around
+// the multiple calls that implement a single Go print or println
+// statement. Some of the print helpers (printsp, for example)
+// call print recursively. There is also the problem of a crash
+// happening during the print routines and needing to acquire
+// the print lock to print information about the crash.
+// For both these reasons, let a thread acquire the printlock 'recursively'.
+
+func printlock() {
+ mp := getg().m
+ mp.locks++ // do not reschedule between printlock++ and lock(&debuglock).
+ mp.printlock++
+ if mp.printlock == 1 {
+ lock(&debuglock)
+ }
+ mp.locks-- // now we know debuglock is held and holding up mp.locks for us.
+}
+
+func printunlock() {
+ mp := getg().m
+ mp.printlock--
+ if mp.printlock == 0 {
+ unlock(&debuglock)
+ }
+}
+
+// write to goroutine-local buffer if diverting output,
+// or else standard error.
+func gwrite(b []byte) {
+ if len(b) == 0 {
+ return
+ }
+ gp := getg()
+ if gp == nil || gp.writebuf == nil {
+ writeErr(b)
+ return
+ }
+
+ n := copy(gp.writebuf[len(gp.writebuf):cap(gp.writebuf)], b)
+ gp.writebuf = gp.writebuf[:len(gp.writebuf)+n]
+}
+
+func printsp() {
+ print(" ")
+}
+
+func printnl() {
+ print("\n")
+}
+
+func printbool(v bool) {
+ if v {
+ print("true")
+ } else {
+ print("false")
+ }
+}
+
+func printfloat(v float64) {
+ switch {
+ case v != v:
+ print("NaN")
+ return
+ case v+v == v && v > 0:
+ print("+Inf")
+ return
+ case v+v == v && v < 0:
+ print("-Inf")
+ return
+ }
+
+ const n = 7 // digits printed
+ var buf [n + 7]byte
+ buf[0] = '+'
+ e := 0 // exp
+ if v == 0 {
+ if 1/v < 0 {
+ buf[0] = '-'
+ }
+ } else {
+ if v < 0 {
+ v = -v
+ buf[0] = '-'
+ }
+
+ // normalize
+ for v >= 10 {
+ e++
+ v /= 10
+ }
+ for v < 1 {
+ e--
+ v *= 10
+ }
+
+ // round
+ h := 5.0
+ for i := 0; i < n; i++ {
+ h /= 10
+ }
+ v += h
+ if v >= 10 {
+ e++
+ v /= 10
+ }
+ }
+
+ // format +d.dddd+edd
+ for i := 0; i < n; i++ {
+ s := int(v)
+ buf[i+2] = byte(s + '0')
+ v -= float64(s)
+ v *= 10
+ }
+ buf[1] = buf[2]
+ buf[2] = '.'
+
+ buf[n+2] = 'e'
+ buf[n+3] = '+'
+ if e < 0 {
+ e = -e
+ buf[n+3] = '-'
+ }
+
+ buf[n+4] = byte(e/100) + '0'
+ buf[n+5] = byte(e/10)%10 + '0'
+ buf[n+6] = byte(e%10) + '0'
+ gwrite(buf[:])
+}
+
+func printcomplex(c complex128) {
+ print("(", real(c), imag(c), "i)")
+}
+
+func printuint(v uint64) {
+ var buf [100]byte
+ i := len(buf)
+ for i--; i > 0; i-- {
+ buf[i] = byte(v%10 + '0')
+ if v < 10 {
+ break
+ }
+ v /= 10
+ }
+ gwrite(buf[i:])
+}
+
+func printint(v int64) {
+ if v < 0 {
+ print("-")
+ v = -v
+ }
+ printuint(uint64(v))
+}
+
+func printhex(v uint64) {
+ const dig = "0123456789abcdef"
+ var buf [100]byte
+ i := len(buf)
+ for i--; i > 0; i-- {
+ buf[i] = dig[v%16]
+ if v < 16 {
+ break
+ }
+ v /= 16
+ }
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+ gwrite(buf[i:])
+}
+
+func printpointer(p unsafe.Pointer) {
+ printhex(uint64(uintptr(p)))
+}
+
+func printstring(s string) {
+ if uintptr(len(s)) > maxstring {
+ gwrite(bytes("[string too long]"))
+ return
+ }
+ gwrite(bytes(s))
+}
+
+func printslice(s []byte) {
+ sp := (*slice)(unsafe.Pointer(&s))
+ print("[", len(s), "/", cap(s), "]")
+ printpointer(unsafe.Pointer(sp.array))
+}
+
+func printeface(e eface) {
+ print("(", e._type, ",", e.data, ")")
+}
+
+func printiface(i iface) {
+ print("(", i.tab, ",", i.data, ")")
+}
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index 4350e8f..37adad5 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -6,8 +6,10 @@ package runtime_test
import (
"math"
+ "net"
"runtime"
"runtime/debug"
+ "strings"
"sync"
"sync/atomic"
"syscall"
@@ -132,6 +134,79 @@ func TestGoroutineParallelism(t *testing.T) {
}
}
+// Test that all runnable goroutines are scheduled at the same time.
+func TestGoroutineParallelism2(t *testing.T) {
+ //testGoroutineParallelism2(t, false, false)
+ testGoroutineParallelism2(t, true, false)
+ testGoroutineParallelism2(t, false, true)
+ testGoroutineParallelism2(t, true, true)
+}
+
+func testGoroutineParallelism2(t *testing.T, load, netpoll bool) {
+ if runtime.NumCPU() == 1 {
+ // Takes too long, too easy to deadlock, etc.
+ t.Skip("skipping on uniprocessor")
+ }
+ P := 4
+ N := 10
+ if testing.Short() {
+ N = 3
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
+ // If runtime triggers a forced GC during this test then it will deadlock,
+ // since the goroutines can't be stopped/preempted.
+ // Disable GC for this test (see issue #10958).
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ for try := 0; try < N; try++ {
+ if load {
+ // Create P goroutines and wait until they all run.
+ // When we run the actual test below, worker threads
+ // running the goroutines will start parking.
+ done := make(chan bool)
+ x := uint32(0)
+ for p := 0; p < P; p++ {
+ go func() {
+ if atomic.AddUint32(&x, 1) == uint32(P) {
+ done <- true
+ return
+ }
+ for atomic.LoadUint32(&x) != uint32(P) {
+ }
+ }()
+ }
+ <-done
+ }
+ if netpoll {
+ // Enable netpoller, affects schedler behavior.
+ ln, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ defer ln.Close() // yup, defer in a loop
+ }
+ }
+ done := make(chan bool)
+ x := uint32(0)
+ // Spawn P goroutines in a nested fashion just to differ from TestGoroutineParallelism.
+ for p := 0; p < P/2; p++ {
+ go func(p int) {
+ for p2 := 0; p2 < 2; p2++ {
+ go func(p2 int) {
+ for i := 0; i < 3; i++ {
+ expected := uint32(P*i + p*2 + p2)
+ for atomic.LoadUint32(&x) != expected {
+ }
+ atomic.StoreUint32(&x, expected+1)
+ }
+ done <- true
+ }(p2)
+ }
+ }(p)
+ }
+ for p := 0; p < P; p++ {
+ <-done
+ }
+ }
+}
+
func TestBlockLocked(t *testing.T) {
const N = 10
c := make(chan bool)
@@ -257,47 +332,44 @@ func TestPreemptionGC(t *testing.T) {
}
func TestGCFairness(t *testing.T) {
- output := executeTest(t, testGCFairnessSource, nil)
+ output := runTestProg(t, "testprog", "GCFairness")
want := "OK\n"
if output != want {
t.Fatalf("want %s, got %s\n", want, output)
}
}
-const testGCFairnessSource = `
-package main
+func TestNumGoroutine(t *testing.T) {
+ output := runTestProg(t, "testprog", "NumGoroutine")
+ want := "1\n"
+ if output != want {
+ t.Fatalf("want %q, got %q", want, output)
+ }
-import (
- "fmt"
- "os"
- "runtime"
- "time"
-)
+ buf := make([]byte, 1<<20)
-func main() {
- runtime.GOMAXPROCS(1)
- f, err := os.Open("/dev/null")
- if os.IsNotExist(err) {
- // This test tests what it is intended to test only if writes are fast.
- // If there is no /dev/null, we just don't execute the test.
- fmt.Println("OK")
- return
- }
- if err != nil {
- fmt.Println(err)
- os.Exit(1)
- }
- for i := 0; i < 2; i++ {
- go func() {
- for {
- f.Write([]byte("."))
- }
- }()
+ // Try up to 10 times for a match before giving up.
+ // This is a fundamentally racy check but it's important
+ // to notice if NumGoroutine and Stack are _always_ out of sync.
+ for i := 0; ; i++ {
+ // Give goroutines about to exit a chance to exit.
+ // The NumGoroutine and Stack below need to see
+ // the same state of the world, so anything we can do
+ // to keep it quiet is good.
+ runtime.Gosched()
+
+ n := runtime.NumGoroutine()
+ buf = buf[:runtime.Stack(buf, true)]
+
+ nstk := strings.Count(string(buf), "goroutine ")
+ if n == nstk {
+ break
+ }
+ if i >= 10 {
+ t.Fatalf("NumGoroutine=%d, but found %d goroutines in stack dump: %s", n, nstk, buf)
+ }
}
- time.Sleep(10 * time.Millisecond)
- fmt.Println("OK")
}
-`
func TestPingPongHog(t *testing.T) {
if testing.Short() {
diff --git a/libgo/go/runtime/race/testdata/issue12225_test.go b/libgo/go/runtime/race/testdata/issue12225_test.go
new file mode 100644
index 0000000..0494493
--- /dev/null
+++ b/libgo/go/runtime/race/testdata/issue12225_test.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import "unsafe"
+
+// golang.org/issue/12225
+// The test is that this compiles at all.
+
+//go:noinline
+func convert(s string) []byte {
+ return []byte(s)
+}
+
+func issue12225() {
+ println(*(*int)(unsafe.Pointer(&convert("")[0])))
+ println(*(*int)(unsafe.Pointer(&[]byte("")[0])))
+}
diff --git a/libgo/go/runtime/race/testdata/issue12664_test.go b/libgo/go/runtime/race/testdata/issue12664_test.go
new file mode 100644
index 0000000..c9f790e
--- /dev/null
+++ b/libgo/go/runtime/race/testdata/issue12664_test.go
@@ -0,0 +1,76 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+import (
+ "fmt"
+ "testing"
+)
+
+var issue12664 = "hi"
+
+func TestRaceIssue12664(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664 = "bye"
+ close(c)
+ }()
+ fmt.Println(issue12664)
+ <-c
+}
+
+type MyI interface {
+ foo()
+}
+
+type MyT int
+
+func (MyT) foo() {
+}
+
+var issue12664_2 MyT = 0
+
+func TestRaceIssue12664_2(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664_2 = 1
+ close(c)
+ }()
+ func(x MyI) {
+ // Never true, but prevents inlining.
+ if x.(MyT) == -1 {
+ close(c)
+ }
+ }(issue12664_2)
+ <-c
+}
+
+var issue12664_3 MyT = 0
+
+func TestRaceIssue12664_3(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664_3 = 1
+ close(c)
+ }()
+ var r MyT
+ var i interface{} = r
+ issue12664_3 = i.(MyT)
+ <-c
+}
+
+var issue12664_4 MyT = 0
+
+func TestRaceIssue12664_4(t *testing.T) {
+ c := make(chan struct{})
+ go func() {
+ issue12664_4 = 1
+ close(c)
+ }()
+ var r MyT
+ var i MyI = r
+ issue12664_4 = i.(MyT)
+ <-c
+}
diff --git a/libgo/go/runtime/race/testdata/issue13264_test.go b/libgo/go/runtime/race/testdata/issue13264_test.go
new file mode 100644
index 0000000..d42290d
--- /dev/null
+++ b/libgo/go/runtime/race/testdata/issue13264_test.go
@@ -0,0 +1,13 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package race_test
+
+// golang.org/issue/13264
+// The test is that this compiles at all.
+
+func issue13264() {
+ for ; ; []map[int]int{}[0][0] = 0 {
+ }
+}
diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go
index bb8ff71..980a9f8 100644
--- a/libgo/go/runtime/runtime_test.go
+++ b/libgo/go/runtime/runtime_test.go
@@ -12,6 +12,13 @@ import (
"unsafe"
)
+func init() {
+ // We're testing the runtime, so make tracebacks show things
+ // in the runtime. This only raises the level, so it won't
+ // override GOTRACEBACK=crash from the user.
+ SetTracebackEnv("system")
+}
+
var errf error
func errfn() error {
@@ -303,3 +310,15 @@ func TestAppendSliceGrowth(t *testing.T) {
}
}
}
+
+func TestGoroutineProfileTrivial(t *testing.T) {
+ n1, ok := GoroutineProfile(nil) // should fail, there's at least 1 goroutine
+ if n1 < 1 || ok {
+ t.Fatalf("GoroutineProfile(nil) = %d, %v, want >0, false", n1, ok)
+ }
+
+ n2, ok := GoroutineProfile(make([]StackRecord, n1))
+ if n2 != n1 || !ok {
+ t.Fatalf("GoroutineProfile(%d) = %d, %v, want %d, true", n1, n2, ok, n1)
+ }
+}
diff --git a/libgo/go/runtime/signal2_unix.go b/libgo/go/runtime/signal2_unix.go
new file mode 100644
index 0000000..3fe625f
--- /dev/null
+++ b/libgo/go/runtime/signal2_unix.go
@@ -0,0 +1,69 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package runtime
+
+import "unsafe"
+
+//go:noescape
+func sigfwd(fn uintptr, sig uint32, info *siginfo, ctx unsafe.Pointer)
+
+// Determines if the signal should be handled by Go and if not, forwards the
+// signal to the handler that was installed before Go's. Returns whether the
+// signal was forwarded.
+// This is called by the signal handler, and the world may be stopped.
+//go:nosplit
+//go:nowritebarrierrec
+func sigfwdgo(sig uint32, info *siginfo, ctx unsafe.Pointer) bool {
+ if sig >= uint32(len(sigtable)) {
+ return false
+ }
+ fwdFn := fwdSig[sig]
+
+ if !signalsOK {
+ // The only way we can get here is if we are in a
+ // library or archive, we installed a signal handler
+ // at program startup, but the Go runtime has not yet
+ // been initialized.
+ if fwdFn == _SIG_DFL {
+ dieFromSignal(int32(sig))
+ } else {
+ sigfwd(fwdFn, sig, info, ctx)
+ }
+ return true
+ }
+
+ flags := sigtable[sig].flags
+
+ // If there is no handler to forward to, no need to forward.
+ if fwdFn == _SIG_DFL {
+ return false
+ }
+
+ // If we aren't handling the signal, forward it.
+ if flags&_SigHandling == 0 {
+ sigfwd(fwdFn, sig, info, ctx)
+ return true
+ }
+
+ // Only forward synchronous signals.
+ c := &sigctxt{info, ctx}
+ if c.sigcode() == _SI_USER || flags&_SigPanic == 0 {
+ return false
+ }
+ // Determine if the signal occurred inside Go code. We test that:
+ // (1) we were in a goroutine (i.e., m.curg != nil), and
+ // (2) we weren't in CGO (i.e., m.curg.syscallsp == 0).
+ g := getg()
+ if g != nil && g.m != nil && g.m.curg != nil && g.m.curg.syscallsp == 0 {
+ return false
+ }
+ // Signal not handled by Go, forward it.
+ if fwdFn != _SIG_IGN {
+ sigfwd(fwdFn, sig, info, ctx)
+ }
+ return true
+}
diff --git a/libgo/go/runtime/signal_linux_mips64x.go b/libgo/go/runtime/signal_linux_mips64x.go
new file mode 100644
index 0000000..671b916
--- /dev/null
+++ b/libgo/go/runtime/signal_linux_mips64x.go
@@ -0,0 +1,70 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext { return &(*ucontext)(c.ctxt).uc_mcontext }
+func (c *sigctxt) r0() uint64 { return c.regs().sc_regs[0] }
+func (c *sigctxt) r1() uint64 { return c.regs().sc_regs[1] }
+func (c *sigctxt) r2() uint64 { return c.regs().sc_regs[2] }
+func (c *sigctxt) r3() uint64 { return c.regs().sc_regs[3] }
+func (c *sigctxt) r4() uint64 { return c.regs().sc_regs[4] }
+func (c *sigctxt) r5() uint64 { return c.regs().sc_regs[5] }
+func (c *sigctxt) r6() uint64 { return c.regs().sc_regs[6] }
+func (c *sigctxt) r7() uint64 { return c.regs().sc_regs[7] }
+func (c *sigctxt) r8() uint64 { return c.regs().sc_regs[8] }
+func (c *sigctxt) r9() uint64 { return c.regs().sc_regs[9] }
+func (c *sigctxt) r10() uint64 { return c.regs().sc_regs[10] }
+func (c *sigctxt) r11() uint64 { return c.regs().sc_regs[11] }
+func (c *sigctxt) r12() uint64 { return c.regs().sc_regs[12] }
+func (c *sigctxt) r13() uint64 { return c.regs().sc_regs[13] }
+func (c *sigctxt) r14() uint64 { return c.regs().sc_regs[14] }
+func (c *sigctxt) r15() uint64 { return c.regs().sc_regs[15] }
+func (c *sigctxt) r16() uint64 { return c.regs().sc_regs[16] }
+func (c *sigctxt) r17() uint64 { return c.regs().sc_regs[17] }
+func (c *sigctxt) r18() uint64 { return c.regs().sc_regs[18] }
+func (c *sigctxt) r19() uint64 { return c.regs().sc_regs[19] }
+func (c *sigctxt) r20() uint64 { return c.regs().sc_regs[20] }
+func (c *sigctxt) r21() uint64 { return c.regs().sc_regs[21] }
+func (c *sigctxt) r22() uint64 { return c.regs().sc_regs[22] }
+func (c *sigctxt) r23() uint64 { return c.regs().sc_regs[23] }
+func (c *sigctxt) r24() uint64 { return c.regs().sc_regs[24] }
+func (c *sigctxt) r25() uint64 { return c.regs().sc_regs[25] }
+func (c *sigctxt) r26() uint64 { return c.regs().sc_regs[26] }
+func (c *sigctxt) r27() uint64 { return c.regs().sc_regs[27] }
+func (c *sigctxt) r28() uint64 { return c.regs().sc_regs[28] }
+func (c *sigctxt) r29() uint64 { return c.regs().sc_regs[29] }
+func (c *sigctxt) r30() uint64 { return c.regs().sc_regs[30] }
+func (c *sigctxt) r31() uint64 { return c.regs().sc_regs[31] }
+func (c *sigctxt) sp() uint64 { return c.regs().sc_regs[29] }
+func (c *sigctxt) pc() uint64 { return c.regs().sc_pc }
+func (c *sigctxt) link() uint64 { return c.regs().sc_regs[31] }
+func (c *sigctxt) lo() uint64 { return c.regs().sc_mdlo }
+func (c *sigctxt) hi() uint64 { return c.regs().sc_mdhi }
+
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
+
+func (c *sigctxt) set_r30(x uint64) { c.regs().sc_regs[30] = x }
+func (c *sigctxt) set_pc(x uint64) { c.regs().sc_pc = x }
+func (c *sigctxt) set_sp(x uint64) { c.regs().sc_regs[29] = x }
+func (c *sigctxt) set_link(x uint64) { c.regs().sc_regs[31] = x }
+
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+}
diff --git a/libgo/go/runtime/signal_mips64x.go b/libgo/go/runtime/signal_mips64x.go
new file mode 100644
index 0000000..77c2714
--- /dev/null
+++ b/libgo/go/runtime/signal_mips64x.go
@@ -0,0 +1,188 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+// +build mips64 mips64le
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+func dumpregs(c *sigctxt) {
+ print("r0 ", hex(c.r0()), "\t")
+ print("r1 ", hex(c.r1()), "\n")
+ print("r2 ", hex(c.r2()), "\t")
+ print("r3 ", hex(c.r3()), "\n")
+ print("r4 ", hex(c.r4()), "\t")
+ print("r5 ", hex(c.r5()), "\n")
+ print("r6 ", hex(c.r6()), "\t")
+ print("r7 ", hex(c.r7()), "\n")
+ print("r8 ", hex(c.r8()), "\t")
+ print("r9 ", hex(c.r9()), "\n")
+ print("r10 ", hex(c.r10()), "\t")
+ print("r11 ", hex(c.r11()), "\n")
+ print("r12 ", hex(c.r12()), "\t")
+ print("r13 ", hex(c.r13()), "\n")
+ print("r14 ", hex(c.r14()), "\t")
+ print("r15 ", hex(c.r15()), "\n")
+ print("r16 ", hex(c.r16()), "\t")
+ print("r17 ", hex(c.r17()), "\n")
+ print("r18 ", hex(c.r18()), "\t")
+ print("r19 ", hex(c.r19()), "\n")
+ print("r20 ", hex(c.r20()), "\t")
+ print("r21 ", hex(c.r21()), "\n")
+ print("r22 ", hex(c.r22()), "\t")
+ print("r23 ", hex(c.r23()), "\n")
+ print("r24 ", hex(c.r24()), "\t")
+ print("r25 ", hex(c.r25()), "\n")
+ print("r26 ", hex(c.r26()), "\t")
+ print("r27 ", hex(c.r27()), "\n")
+ print("r28 ", hex(c.r28()), "\t")
+ print("r29 ", hex(c.r29()), "\n")
+ print("r30 ", hex(c.r30()), "\t")
+ print("r31 ", hex(c.r31()), "\n")
+ print("pc ", hex(c.pc()), "\t")
+ print("link ", hex(c.link()), "\n")
+ print("lo ", hex(c.lo()), "\t")
+ print("hi ", hex(c.hi()), "\n")
+}
+
+var crashing int32
+
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
+ _g_ := getg()
+ c := &sigctxt{info, ctxt}
+
+ if sig == _SIGPROF {
+ sigprof(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp, _g_.m)
+ return
+ }
+ flags := int32(_SigThrow)
+ if sig < uint32(len(sigtable)) {
+ flags = sigtable[sig].flags
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp.sig = sig
+ gp.sigcode0 = uintptr(c.sigcode())
+ gp.sigcode1 = uintptr(c.sigaddr())
+ gp.sigpc = uintptr(c.pc())
+
+ // We arrange link, and pc to pretend the panicking
+ // function calls sigpanic directly.
+ // Always save LINK to stack so that panics in leaf
+ // functions are correctly handled. This smashes
+ // the stack frame but we're not going back there
+ // anyway.
+ sp := c.sp() - sys.PtrSize
+ c.set_sp(sp)
+ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
+
+ pc := uintptr(gp.sigpc)
+
+ // If we don't recognize the PC as code
+ // but we do recognize the link register as code,
+ // then assume this was a call to non-code and treat like
+ // pc == 0, to make unwinding show the context.
+ if pc != 0 && findfunc(pc) == nil && findfunc(uintptr(c.link())) != nil {
+ pc = 0
+ }
+
+ // Don't bother saving PC if it's zero, which is
+ // probably a call to a nil func: the old link register
+ // is more useful in the stack trace.
+ if pc != 0 {
+ c.set_link(uint64(pc))
+ }
+
+ // In case we are panicking from external C code
+ c.set_r30(uint64(uintptr(unsafe.Pointer(gp))))
+ c.set_pc(uint64(funcPC(sigpanic)))
+ return
+ }
+
+ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
+ if sigsend(sig) {
+ return
+ }
+ }
+
+ if c.sigcode() == _SI_USER && signal_ignored(sig) {
+ return
+ }
+
+ if flags&_SigKill != 0 {
+ dieFromSignal(int32(sig))
+ }
+
+ if flags&_SigThrow == 0 {
+ return
+ }
+
+ _g_.m.throwing = 1
+ _g_.m.caughtsig.set(gp)
+
+ if crashing == 0 {
+ startpanic()
+ }
+
+ if sig < uint32(len(sigtable)) {
+ print(sigtable[sig].name, "\n")
+ } else {
+ print("Signal ", sig, "\n")
+ }
+
+ print("PC=", hex(c.pc()), " m=", _g_.m.id, "\n")
+ if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
+ print("signal arrived during cgo execution\n")
+ gp = _g_.m.lockedg
+ }
+ print("\n")
+
+ level, _, docrash := gotraceback()
+ if level > 0 {
+ goroutineheader(gp)
+ tracebacktrap(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp)
+ if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
+ // tracebackothers on original m skipped this one; trace it now.
+ goroutineheader(_g_.m.curg)
+ traceback(^uintptr(0), ^uintptr(0), 0, gp)
+ } else if crashing == 0 {
+ tracebackothers(gp)
+ print("\n")
+ }
+ dumpregs(c)
+ }
+
+ if docrash {
+ crashing++
+ if crashing < sched.mcount {
+ // There are other m's that need to dump their stacks.
+ // Relay SIGQUIT to the next m by sending it to the current process.
+ // All m's that have already received SIGQUIT have signal masks blocking
+ // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
+ // When the last m receives the SIGQUIT, it will fall through to the call to
+ // crash below. Just in case the relaying gets botched, each m involved in
+ // the relay sleeps for 5 seconds and then does the crash/exit itself.
+ // In expected operation, the last m has received the SIGQUIT and run
+ // crash/exit and the process is gone, all long before any of the
+ // 5-second sleeps have finished.
+ print("\n-----\n\n")
+ raiseproc(_SIGQUIT)
+ usleep(5 * 1000 * 1000)
+ }
+ crash()
+ }
+
+ exit(2)
+}
diff --git a/libgo/go/runtime/signal_sigtramp.go b/libgo/go/runtime/signal_sigtramp.go
new file mode 100644
index 0000000..00ab038
--- /dev/null
+++ b/libgo/go/runtime/signal_sigtramp.go
@@ -0,0 +1,50 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly linux netbsd
+
+package runtime
+
+import "unsafe"
+
+// Continuation of the (assembly) sigtramp() logic.
+// This may be called with the world stopped.
+//go:nosplit
+//go:nowritebarrierrec
+func sigtrampgo(sig uint32, info *siginfo, ctx unsafe.Pointer) {
+ if sigfwdgo(sig, info, ctx) {
+ return
+ }
+ g := getg()
+ if g == nil {
+ badsignal(uintptr(sig))
+ return
+ }
+
+ // If some non-Go code called sigaltstack, adjust.
+ sp := uintptr(unsafe.Pointer(&sig))
+ if sp < g.m.gsignal.stack.lo || sp >= g.m.gsignal.stack.hi {
+ var st sigaltstackt
+ sigaltstack(nil, &st)
+ if st.ss_flags&_SS_DISABLE != 0 {
+ setg(nil)
+ cgocallback(unsafe.Pointer(funcPC(noSignalStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
+ }
+ stsp := uintptr(unsafe.Pointer(st.ss_sp))
+ if sp < stsp || sp >= stsp+st.ss_size {
+ setg(nil)
+ cgocallback(unsafe.Pointer(funcPC(sigNotOnStack)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
+ }
+ g.m.gsignal.stack.lo = stsp
+ g.m.gsignal.stack.hi = stsp + st.ss_size
+ g.m.gsignal.stackguard0 = stsp + _StackGuard
+ g.m.gsignal.stackguard1 = stsp + _StackGuard
+ g.m.gsignal.stackAlloc = st.ss_size
+ g.m.gsignal.stktopsp = getcallersp(unsafe.Pointer(&sig))
+ }
+
+ setg(g.m.gsignal)
+ sighandler(sig, info, ctx, g)
+ setg(g)
+}
diff --git a/libgo/go/runtime/sigtab_linux_generic.go b/libgo/go/runtime/sigtab_linux_generic.go
new file mode 100644
index 0000000..32c40c4
--- /dev/null
+++ b/libgo/go/runtime/sigtab_linux_generic.go
@@ -0,0 +1,82 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !mips64
+// +build !mips64le
+// +build linux
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit"},
+ /* 4 */ {_SigThrow + _SigUnblock, "SIGILL: illegal instruction"},
+ /* 5 */ {_SigThrow + _SigUnblock, "SIGTRAP: trace trap"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: abort"},
+ /* 7 */ {_SigPanic + _SigUnblock, "SIGBUS: bus error"},
+ /* 8 */ {_SigPanic + _SigUnblock, "SIGFPE: floating-point exception"},
+ /* 9 */ {0, "SIGKILL: kill"},
+ /* 10 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
+ /* 11 */ {_SigPanic + _SigUnblock, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
+ /* 16 */ {_SigThrow + _SigUnblock, "SIGSTKFLT: stack fault"},
+ /* 17 */ {_SigNotify + _SigUnblock, "SIGCHLD: child status has changed"},
+ /* 18 */ {_SigNotify + _SigDefault, "SIGCONT: continue"},
+ /* 19 */ {0, "SIGSTOP: stop, unblockable"},
+ /* 20 */ {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"},
+ /* 21 */ {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"},
+ /* 22 */ {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"},
+ /* 23 */ {_SigNotify, "SIGURG: urgent condition on socket"},
+ /* 24 */ {_SigNotify, "SIGXCPU: cpu limit exceeded"},
+ /* 25 */ {_SigNotify, "SIGXFSZ: file size limit exceeded"},
+ /* 26 */ {_SigNotify, "SIGVTALRM: virtual alarm clock"},
+ /* 27 */ {_SigNotify + _SigUnblock, "SIGPROF: profiling alarm clock"},
+ /* 28 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 29 */ {_SigNotify, "SIGIO: i/o now possible"},
+ /* 30 */ {_SigNotify, "SIGPWR: power failure restart"},
+ /* 31 */ {_SigNotify, "SIGSYS: bad system call"},
+ /* 32 */ {_SigSetStack + _SigUnblock, "signal 32"}, /* SIGCANCEL; see issue 6997 */
+ /* 33 */ {_SigSetStack + _SigUnblock, "signal 33"}, /* SIGSETXID; see issues 3871, 9400, 12498 */
+ /* 34 */ {_SigNotify, "signal 34"},
+ /* 35 */ {_SigNotify, "signal 35"},
+ /* 36 */ {_SigNotify, "signal 36"},
+ /* 37 */ {_SigNotify, "signal 37"},
+ /* 38 */ {_SigNotify, "signal 38"},
+ /* 39 */ {_SigNotify, "signal 39"},
+ /* 40 */ {_SigNotify, "signal 40"},
+ /* 41 */ {_SigNotify, "signal 41"},
+ /* 42 */ {_SigNotify, "signal 42"},
+ /* 43 */ {_SigNotify, "signal 43"},
+ /* 44 */ {_SigNotify, "signal 44"},
+ /* 45 */ {_SigNotify, "signal 45"},
+ /* 46 */ {_SigNotify, "signal 46"},
+ /* 47 */ {_SigNotify, "signal 47"},
+ /* 48 */ {_SigNotify, "signal 48"},
+ /* 49 */ {_SigNotify, "signal 49"},
+ /* 50 */ {_SigNotify, "signal 50"},
+ /* 51 */ {_SigNotify, "signal 51"},
+ /* 52 */ {_SigNotify, "signal 52"},
+ /* 53 */ {_SigNotify, "signal 53"},
+ /* 54 */ {_SigNotify, "signal 54"},
+ /* 55 */ {_SigNotify, "signal 55"},
+ /* 56 */ {_SigNotify, "signal 56"},
+ /* 57 */ {_SigNotify, "signal 57"},
+ /* 58 */ {_SigNotify, "signal 58"},
+ /* 59 */ {_SigNotify, "signal 59"},
+ /* 60 */ {_SigNotify, "signal 60"},
+ /* 61 */ {_SigNotify, "signal 61"},
+ /* 62 */ {_SigNotify, "signal 62"},
+ /* 63 */ {_SigNotify, "signal 63"},
+ /* 64 */ {_SigNotify, "signal 64"},
+}
diff --git a/libgo/go/runtime/sigtab_linux_mips64x.go b/libgo/go/runtime/sigtab_linux_mips64x.go
new file mode 100644
index 0000000..dbd50f7
--- /dev/null
+++ b/libgo/go/runtime/sigtab_linux_mips64x.go
@@ -0,0 +1,81 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips64 mips64le
+// +build linux
+
+package runtime
+
+type sigTabT struct {
+ flags int32
+ name string
+}
+
+var sigtable = [...]sigTabT{
+ /* 0 */ {0, "SIGNONE: no trap"},
+ /* 1 */ {_SigNotify + _SigKill, "SIGHUP: terminal line hangup"},
+ /* 2 */ {_SigNotify + _SigKill, "SIGINT: interrupt"},
+ /* 3 */ {_SigNotify + _SigThrow, "SIGQUIT: quit"},
+ /* 4 */ {_SigThrow + _SigUnblock, "SIGILL: illegal instruction"},
+ /* 5 */ {_SigThrow + _SigUnblock, "SIGTRAP: trace trap"},
+ /* 6 */ {_SigNotify + _SigThrow, "SIGABRT: abort"},
+ /* 7 */ {_SigThrow, "SIGEMT"},
+ /* 8 */ {_SigPanic + _SigUnblock, "SIGFPE: floating-point exception"},
+ /* 9 */ {0, "SIGKILL: kill"},
+ /* 10 */ {_SigPanic + _SigUnblock, "SIGBUS: bus error"},
+ /* 11 */ {_SigPanic + _SigUnblock, "SIGSEGV: segmentation violation"},
+ /* 12 */ {_SigNotify, "SIGSYS: bad system call"},
+ /* 13 */ {_SigNotify, "SIGPIPE: write to broken pipe"},
+ /* 14 */ {_SigNotify, "SIGALRM: alarm clock"},
+ /* 15 */ {_SigNotify + _SigKill, "SIGTERM: termination"},
+ /* 16 */ {_SigNotify, "SIGUSR1: user-defined signal 1"},
+ /* 17 */ {_SigNotify, "SIGUSR2: user-defined signal 2"},
+ /* 18 */ {_SigNotify + _SigUnblock, "SIGCHLD: child status has changed"},
+ /* 19 */ {_SigNotify, "SIGPWR: power failure restart"},
+ /* 20 */ {_SigNotify, "SIGWINCH: window size change"},
+ /* 21 */ {_SigNotify, "SIGURG: urgent condition on socket"},
+ /* 22 */ {_SigNotify, "SIGIO: i/o now possible"},
+ /* 23 */ {0, "SIGSTOP: stop, unblockable"},
+ /* 24 */ {_SigNotify + _SigDefault, "SIGTSTP: keyboard stop"},
+ /* 25 */ {_SigNotify + _SigDefault, "SIGCONT: continue"},
+ /* 26 */ {_SigNotify + _SigDefault, "SIGTTIN: background read from tty"},
+ /* 27 */ {_SigNotify + _SigDefault, "SIGTTOU: background write to tty"},
+ /* 28 */ {_SigNotify, "SIGVTALRM: virtual alarm clock"},
+ /* 29 */ {_SigNotify + _SigUnblock, "SIGPROF: profiling alarm clock"},
+ /* 30 */ {_SigNotify, "SIGXCPU: cpu limit exceeded"},
+ /* 31 */ {_SigNotify, "SIGXFSZ: file size limit exceeded"},
+ /* 32 */ {_SigSetStack + _SigUnblock, "signal 32"}, /* SIGCANCEL; see issue 6997 */
+ /* 33 */ {_SigSetStack + _SigUnblock, "signal 33"}, /* SIGSETXID; see issues 3871, 9400, 12498 */
+ /* 34 */ {_SigNotify, "signal 34"},
+ /* 35 */ {_SigNotify, "signal 35"},
+ /* 36 */ {_SigNotify, "signal 36"},
+ /* 37 */ {_SigNotify, "signal 37"},
+ /* 38 */ {_SigNotify, "signal 38"},
+ /* 39 */ {_SigNotify, "signal 39"},
+ /* 40 */ {_SigNotify, "signal 40"},
+ /* 41 */ {_SigNotify, "signal 41"},
+ /* 42 */ {_SigNotify, "signal 42"},
+ /* 43 */ {_SigNotify, "signal 43"},
+ /* 44 */ {_SigNotify, "signal 44"},
+ /* 45 */ {_SigNotify, "signal 45"},
+ /* 46 */ {_SigNotify, "signal 46"},
+ /* 47 */ {_SigNotify, "signal 47"},
+ /* 48 */ {_SigNotify, "signal 48"},
+ /* 49 */ {_SigNotify, "signal 49"},
+ /* 50 */ {_SigNotify, "signal 50"},
+ /* 51 */ {_SigNotify, "signal 51"},
+ /* 52 */ {_SigNotify, "signal 52"},
+ /* 53 */ {_SigNotify, "signal 53"},
+ /* 54 */ {_SigNotify, "signal 54"},
+ /* 55 */ {_SigNotify, "signal 55"},
+ /* 56 */ {_SigNotify, "signal 56"},
+ /* 57 */ {_SigNotify, "signal 57"},
+ /* 58 */ {_SigNotify, "signal 58"},
+ /* 59 */ {_SigNotify, "signal 59"},
+ /* 60 */ {_SigNotify, "signal 60"},
+ /* 61 */ {_SigNotify, "signal 61"},
+ /* 62 */ {_SigNotify, "signal 62"},
+ /* 63 */ {_SigNotify, "signal 63"},
+ /* 64 */ {_SigNotify, "signal 64"},
+}
diff --git a/libgo/go/runtime/stack.go b/libgo/go/runtime/stack.go
new file mode 100644
index 0000000..8105996
--- /dev/null
+++ b/libgo/go/runtime/stack.go
@@ -0,0 +1,1068 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/atomic"
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+/*
+Stack layout parameters.
+Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
+
+The per-goroutine g->stackguard is set to point StackGuard bytes
+above the bottom of the stack. Each function compares its stack
+pointer against g->stackguard to check for overflow. To cut one
+instruction from the check sequence for functions with tiny frames,
+the stack is allowed to protrude StackSmall bytes below the stack
+guard. Functions with large frames don't bother with the check and
+always call morestack. The sequences are (for amd64, others are
+similar):
+
+ guard = g->stackguard
+ frame = function's stack frame size
+ argsize = size of function arguments (call + return)
+
+ stack frame size <= StackSmall:
+ CMPQ guard, SP
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size > StackSmall but < StackBig
+ LEAQ (frame-StackSmall)(SP), R0
+ CMPQ guard, R0
+ JHI 3(PC)
+ MOVQ m->morearg, $(argsize << 32)
+ CALL morestack(SB)
+
+ stack frame size >= StackBig:
+ MOVQ m->morearg, $((argsize << 32) | frame)
+ CALL morestack(SB)
+
+The bottom StackGuard - StackSmall bytes are important: there has
+to be enough room to execute functions that refuse to check for
+stack overflow, either because they need to be adjacent to the
+actual caller's frame (deferproc) or because they handle the imminent
+stack overflow (morestack).
+
+For example, deferproc might call malloc, which does one of the
+above checks (without allocating a full frame), which might trigger
+a call to morestack. This sequence needs to fit in the bottom
+section of the stack. On amd64, morestack's frame is 40 bytes, and
+deferproc's frame is 56 bytes. That fits well within the
+StackGuard - StackSmall bytes at the bottom.
+The linkers explore all possible call traces involving non-splitting
+functions to make sure that this limit cannot be violated.
+*/
+
+const (
+ // StackSystem is a number of additional bytes to add
+ // to each stack below the usual guard area for OS-specific
+ // purposes like signal handling. Used on Windows, Plan 9,
+ // and Darwin/ARM because they do not use a separate stack.
+ _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
+
+ // The minimum size of stack used by Go code
+ _StackMin = 2048
+
+ // The minimum stack size to allocate.
+ // The hackery here rounds FixedStack0 up to a power of 2.
+ _FixedStack0 = _StackMin + _StackSystem
+ _FixedStack1 = _FixedStack0 - 1
+ _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
+ _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
+ _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
+ _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
+ _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
+ _FixedStack = _FixedStack6 + 1
+
+ // Functions that need frames bigger than this use an extra
+ // instruction to do the stack split check, to avoid overflow
+ // in case SP - framesize wraps below zero.
+ // This value can be no bigger than the size of the unmapped
+ // space at zero.
+ _StackBig = 4096
+
+ // The stack guard is a pointer this many bytes above the
+ // bottom of the stack.
+ _StackGuard = 720*sys.StackGuardMultiplier + _StackSystem
+
+ // After a stack split check the SP is allowed to be this
+ // many bytes below the stack guard. This saves an instruction
+ // in the checking sequence for tiny frames.
+ _StackSmall = 128
+
+ // The maximum number of bytes that a chain of NOSPLIT
+ // functions can use.
+ _StackLimit = _StackGuard - _StackSystem - _StackSmall
+)
+
+// Goroutine preemption request.
+// Stored into g->stackguard0 to cause split stack check failure.
+// Must be greater than any real sp.
+// 0xfffffade in hex.
+const (
+ _StackPreempt = uintptrMask & -1314
+ _StackFork = uintptrMask & -1234
+)
+
+const (
+ // stackDebug == 0: no logging
+ // == 1: logging of per-stack operations
+ // == 2: logging of per-frame operations
+ // == 3: logging of per-word updates
+ // == 4: logging of per-word reads
+ stackDebug = 0
+ stackFromSystem = 0 // allocate stacks from system memory instead of the heap
+ stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
+ stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
+
+ stackCache = 1
+)
+
+const (
+ uintptrMask = 1<<(8*sys.PtrSize) - 1
+ poisonStack = uintptrMask & 0x6868686868686868
+
+ // Goroutine preemption request.
+ // Stored into g->stackguard0 to cause split stack check failure.
+ // Must be greater than any real sp.
+ // 0xfffffade in hex.
+ stackPreempt = uintptrMask & -1314
+
+ // Thread is forking.
+ // Stored into g->stackguard0 to cause split stack check failure.
+ // Must be greater than any real sp.
+ stackFork = uintptrMask & -1234
+)
+
+// Global pool of spans that have free stacks.
+// Stacks are assigned an order according to size.
+// order = log_2(size/FixedStack)
+// There is a free list for each order.
+// TODO: one lock per order?
+var stackpool [_NumStackOrders]mSpanList
+var stackpoolmu mutex
+
+// Global pool of large stack spans.
+var stackLarge struct {
+ lock mutex
+ free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
+}
+
+// Cached value of haveexperiment("framepointer")
+var framepointer_enabled bool
+
+func stackinit() {
+ if _StackCacheSize&_PageMask != 0 {
+ throw("cache size must be a multiple of page size")
+ }
+ for i := range stackpool {
+ stackpool[i].init()
+ }
+ for i := range stackLarge.free {
+ stackLarge.free[i].init()
+ }
+}
+
+// stacklog2 returns ⌊log_2(n)⌋.
+func stacklog2(n uintptr) int {
+ log2 := 0
+ for n > 1 {
+ n >>= 1
+ log2++
+ }
+ return log2
+}
+
+// Allocates a stack from the free pool. Must be called with
+// stackpoolmu held.
+func stackpoolalloc(order uint8) gclinkptr {
+ list := &stackpool[order]
+ s := list.first
+ if s == nil {
+ // no free stacks. Allocate another span worth.
+ s = mheap_.allocStack(_StackCacheSize >> _PageShift)
+ if s == nil {
+ throw("out of memory")
+ }
+ if s.ref != 0 {
+ throw("bad ref")
+ }
+ if s.freelist.ptr() != nil {
+ throw("bad freelist")
+ }
+ for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
+ x := gclinkptr(uintptr(s.start)<<_PageShift + i)
+ x.ptr().next = s.freelist
+ s.freelist = x
+ }
+ list.insert(s)
+ }
+ x := s.freelist
+ if x.ptr() == nil {
+ throw("span has no free stacks")
+ }
+ s.freelist = x.ptr().next
+ s.ref++
+ if s.freelist.ptr() == nil {
+ // all stacks in s are allocated.
+ list.remove(s)
+ }
+ return x
+}
+
+// Adds stack x to the free pool. Must be called with stackpoolmu held.
+func stackpoolfree(x gclinkptr, order uint8) {
+ s := mheap_.lookup(unsafe.Pointer(x))
+ if s.state != _MSpanStack {
+ throw("freeing stack not in a stack span")
+ }
+ if s.freelist.ptr() == nil {
+ // s will now have a free stack
+ stackpool[order].insert(s)
+ }
+ x.ptr().next = s.freelist
+ s.freelist = x
+ s.ref--
+ if gcphase == _GCoff && s.ref == 0 {
+ // Span is completely free. Return it to the heap
+ // immediately if we're sweeping.
+ //
+ // If GC is active, we delay the free until the end of
+ // GC to avoid the following type of situation:
+ //
+ // 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
+ // 2) The stack that pointer points to is copied
+ // 3) The old stack is freed
+ // 4) The containing span is marked free
+ // 5) GC attempts to mark the SudoG.elem pointer. The
+ // marking fails because the pointer looks like a
+ // pointer into a free span.
+ //
+ // By not freeing, we prevent step #4 until GC is done.
+ stackpool[order].remove(s)
+ s.freelist = 0
+ mheap_.freeStack(s)
+ }
+}
+
+// stackcacherefill/stackcacherelease implement a global pool of stack segments.
+// The pool is required to prevent unlimited growth of per-thread caches.
+func stackcacherefill(c *mcache, order uint8) {
+ if stackDebug >= 1 {
+ print("stackcacherefill order=", order, "\n")
+ }
+
+ // Grab some stacks from the global cache.
+ // Grab half of the allowed capacity (to prevent thrashing).
+ var list gclinkptr
+ var size uintptr
+ lock(&stackpoolmu)
+ for size < _StackCacheSize/2 {
+ x := stackpoolalloc(order)
+ x.ptr().next = list
+ list = x
+ size += _FixedStack << order
+ }
+ unlock(&stackpoolmu)
+ c.stackcache[order].list = list
+ c.stackcache[order].size = size
+}
+
+func stackcacherelease(c *mcache, order uint8) {
+ if stackDebug >= 1 {
+ print("stackcacherelease order=", order, "\n")
+ }
+ x := c.stackcache[order].list
+ size := c.stackcache[order].size
+ lock(&stackpoolmu)
+ for size > _StackCacheSize/2 {
+ y := x.ptr().next
+ stackpoolfree(x, order)
+ x = y
+ size -= _FixedStack << order
+ }
+ unlock(&stackpoolmu)
+ c.stackcache[order].list = x
+ c.stackcache[order].size = size
+}
+
+func stackcache_clear(c *mcache) {
+ if stackDebug >= 1 {
+ print("stackcache clear\n")
+ }
+ lock(&stackpoolmu)
+ for order := uint8(0); order < _NumStackOrders; order++ {
+ x := c.stackcache[order].list
+ for x.ptr() != nil {
+ y := x.ptr().next
+ stackpoolfree(x, order)
+ x = y
+ }
+ c.stackcache[order].list = 0
+ c.stackcache[order].size = 0
+ }
+ unlock(&stackpoolmu)
+}
+
+func stackalloc(n uint32) (stack, []stkbar) {
+ // Stackalloc must be called on scheduler stack, so that we
+ // never try to grow the stack during the code that stackalloc runs.
+ // Doing so would cause a deadlock (issue 1547).
+ thisg := getg()
+ if thisg != thisg.m.g0 {
+ throw("stackalloc not on scheduler stack")
+ }
+ if n&(n-1) != 0 {
+ throw("stack size not a power of 2")
+ }
+ if stackDebug >= 1 {
+ print("stackalloc ", n, "\n")
+ }
+
+ // Compute the size of stack barrier array.
+ maxstkbar := gcMaxStackBarriers(int(n))
+ nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
+
+ if debug.efence != 0 || stackFromSystem != 0 {
+ v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
+ if v == nil {
+ throw("out of memory (stackalloc)")
+ }
+ top := uintptr(n) - nstkbar
+ stkbarSlice := slice{add(v, top), 0, maxstkbar}
+ return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
+ }
+
+ // Small stacks are allocated with a fixed-size free-list allocator.
+ // If we need a stack of a bigger size, we fall back on allocating
+ // a dedicated span.
+ var v unsafe.Pointer
+ if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ order := uint8(0)
+ n2 := n
+ for n2 > _FixedStack {
+ order++
+ n2 >>= 1
+ }
+ var x gclinkptr
+ c := thisg.m.mcache
+ if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
+ // c == nil can happen in the guts of exitsyscall or
+ // procresize. Just get a stack from the global pool.
+ // Also don't touch stackcache during gc
+ // as it's flushed concurrently.
+ lock(&stackpoolmu)
+ x = stackpoolalloc(order)
+ unlock(&stackpoolmu)
+ } else {
+ x = c.stackcache[order].list
+ if x.ptr() == nil {
+ stackcacherefill(c, order)
+ x = c.stackcache[order].list
+ }
+ c.stackcache[order].list = x.ptr().next
+ c.stackcache[order].size -= uintptr(n)
+ }
+ v = unsafe.Pointer(x)
+ } else {
+ var s *mspan
+ npage := uintptr(n) >> _PageShift
+ log2npage := stacklog2(npage)
+
+ // Try to get a stack from the large stack cache.
+ lock(&stackLarge.lock)
+ if !stackLarge.free[log2npage].isEmpty() {
+ s = stackLarge.free[log2npage].first
+ stackLarge.free[log2npage].remove(s)
+ }
+ unlock(&stackLarge.lock)
+
+ if s == nil {
+ // Allocate a new stack from the heap.
+ s = mheap_.allocStack(npage)
+ if s == nil {
+ throw("out of memory")
+ }
+ }
+ v = unsafe.Pointer(s.start << _PageShift)
+ }
+
+ if raceenabled {
+ racemalloc(v, uintptr(n))
+ }
+ if msanenabled {
+ msanmalloc(v, uintptr(n))
+ }
+ if stackDebug >= 1 {
+ print(" allocated ", v, "\n")
+ }
+ top := uintptr(n) - nstkbar
+ stkbarSlice := slice{add(v, top), 0, maxstkbar}
+ return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
+}
+
+func stackfree(stk stack, n uintptr) {
+ gp := getg()
+ v := unsafe.Pointer(stk.lo)
+ if n&(n-1) != 0 {
+ throw("stack not a power of 2")
+ }
+ if stk.lo+n < stk.hi {
+ throw("bad stack size")
+ }
+ if stackDebug >= 1 {
+ println("stackfree", v, n)
+ memclr(v, n) // for testing, clobber stack data
+ }
+ if debug.efence != 0 || stackFromSystem != 0 {
+ if debug.efence != 0 || stackFaultOnFree != 0 {
+ sysFault(v, n)
+ } else {
+ sysFree(v, n, &memstats.stacks_sys)
+ }
+ return
+ }
+ if msanenabled {
+ msanfree(v, n)
+ }
+ if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
+ order := uint8(0)
+ n2 := n
+ for n2 > _FixedStack {
+ order++
+ n2 >>= 1
+ }
+ x := gclinkptr(v)
+ c := gp.m.mcache
+ if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
+ lock(&stackpoolmu)
+ stackpoolfree(x, order)
+ unlock(&stackpoolmu)
+ } else {
+ if c.stackcache[order].size >= _StackCacheSize {
+ stackcacherelease(c, order)
+ }
+ x.ptr().next = c.stackcache[order].list
+ c.stackcache[order].list = x
+ c.stackcache[order].size += n
+ }
+ } else {
+ s := mheap_.lookup(v)
+ if s.state != _MSpanStack {
+ println(hex(s.start<<_PageShift), v)
+ throw("bad span state")
+ }
+ if gcphase == _GCoff {
+ // Free the stack immediately if we're
+ // sweeping.
+ mheap_.freeStack(s)
+ } else {
+ // If the GC is running, we can't return a
+ // stack span to the heap because it could be
+ // reused as a heap span, and this state
+ // change would race with GC. Add it to the
+ // large stack cache instead.
+ log2npage := stacklog2(s.npages)
+ lock(&stackLarge.lock)
+ stackLarge.free[log2npage].insert(s)
+ unlock(&stackLarge.lock)
+ }
+ }
+}
+
+var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
+
+var ptrnames = []string{
+ 0: "scalar",
+ 1: "ptr",
+}
+
+// Stack frame layout
+//
+// (x86)
+// +------------------+
+// | args from caller |
+// +------------------+ <- frame->argp
+// | return address |
+// +------------------+
+// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
+// +------------------+ <- frame->varp
+// | locals |
+// +------------------+
+// | args to callee |
+// +------------------+ <- frame->sp
+//
+// (arm)
+// +------------------+
+// | args from caller |
+// +------------------+ <- frame->argp
+// | caller's retaddr |
+// +------------------+ <- frame->varp
+// | locals |
+// +------------------+
+// | args to callee |
+// +------------------+
+// | return address |
+// +------------------+ <- frame->sp
+
+type adjustinfo struct {
+ old stack
+ delta uintptr // ptr distance from old to new stack (newbase - oldbase)
+ cache pcvalueCache
+}
+
+// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
+// If so, it rewrites *vpp to point into the new stack.
+func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
+ pp := (*unsafe.Pointer)(vpp)
+ p := *pp
+ if stackDebug >= 4 {
+ print(" ", pp, ":", p, "\n")
+ }
+ if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
+ *pp = add(p, adjinfo.delta)
+ if stackDebug >= 3 {
+ print(" adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
+ }
+ }
+}
+
+// Information from the compiler about the layout of stack frames.
+type bitvector struct {
+ n int32 // # of bits
+ bytedata *uint8
+}
+
+type gobitvector struct {
+ n uintptr
+ bytedata []uint8
+}
+
+func gobv(bv bitvector) gobitvector {
+ return gobitvector{
+ uintptr(bv.n),
+ (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
+ }
+}
+
+func ptrbit(bv *gobitvector, i uintptr) uint8 {
+ return (bv.bytedata[i/8] >> (i % 8)) & 1
+}
+
+// bv describes the memory starting at address scanp.
+// Adjust any pointers contained therein.
+func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
+ bv := gobv(*cbv)
+ minp := adjinfo.old.lo
+ maxp := adjinfo.old.hi
+ delta := adjinfo.delta
+ num := uintptr(bv.n)
+ for i := uintptr(0); i < num; i++ {
+ if stackDebug >= 4 {
+ print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
+ }
+ if ptrbit(&bv, i) == 1 {
+ pp := (*uintptr)(add(scanp, i*sys.PtrSize))
+ p := *pp
+ if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
+ // Looks like a junk value in a pointer slot.
+ // Live analysis wrong?
+ getg().m.traceback = 2
+ print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
+ throw("invalid stack pointer")
+ }
+ if minp <= p && p < maxp {
+ if stackDebug >= 3 {
+ print("adjust ptr ", p, " ", funcname(f), "\n")
+ }
+ *pp = p + delta
+ }
+ }
+ }
+}
+
+// Note: the argument/return area is adjusted by the callee.
+func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
+ adjinfo := (*adjustinfo)(arg)
+ targetpc := frame.continpc
+ if targetpc == 0 {
+ // Frame is dead.
+ return true
+ }
+ f := frame.fn
+ if stackDebug >= 2 {
+ print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
+ }
+ if f.entry == systemstack_switchPC {
+ // A special routine at the bottom of stack of a goroutine that does an systemstack call.
+ // We will allow it to be copied even though we don't
+ // have full GC info for it (because it is written in asm).
+ return true
+ }
+ if targetpc != f.entry {
+ targetpc--
+ }
+ pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
+ if pcdata == -1 {
+ pcdata = 0 // in prologue
+ }
+
+ // Adjust local variables if stack frame has been allocated.
+ size := frame.varp - frame.sp
+ var minsize uintptr
+ switch sys.TheChar {
+ case '7':
+ minsize = sys.SpAlign
+ default:
+ minsize = sys.MinFrameSize
+ }
+ if size > minsize {
+ var bv bitvector
+ stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
+ if stackmap == nil || stackmap.n <= 0 {
+ print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
+ throw("missing stackmap")
+ }
+ // Locals bitmap information, scan just the pointers in locals.
+ if pcdata < 0 || pcdata >= stackmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
+ throw("bad symbol table")
+ }
+ bv = stackmapdata(stackmap, pcdata)
+ size = uintptr(bv.n) * sys.PtrSize
+ if stackDebug >= 3 {
+ print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
+ }
+ adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
+ }
+
+ // Adjust saved base pointer if there is one.
+ if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize {
+ if !framepointer_enabled {
+ print("runtime: found space for saved base pointer, but no framepointer experiment\n")
+ print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
+ throw("bad frame layout")
+ }
+ if stackDebug >= 3 {
+ print(" saved bp\n")
+ }
+ adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
+ }
+
+ // Adjust arguments.
+ if frame.arglen > 0 {
+ var bv bitvector
+ if frame.argmap != nil {
+ bv = *frame.argmap
+ } else {
+ stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
+ if stackmap == nil || stackmap.n <= 0 {
+ print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
+ throw("missing stackmap")
+ }
+ if pcdata < 0 || pcdata >= stackmap.n {
+ // don't know where we are
+ print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
+ throw("bad symbol table")
+ }
+ bv = stackmapdata(stackmap, pcdata)
+ }
+ if stackDebug >= 3 {
+ print(" args\n")
+ }
+ adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
+ }
+ return true
+}
+
+func adjustctxt(gp *g, adjinfo *adjustinfo) {
+ adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
+}
+
+func adjustdefers(gp *g, adjinfo *adjustinfo) {
+ // Adjust defer argument blocks the same way we adjust active stack frames.
+ tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
+
+ // Adjust pointers in the Defer structs.
+ // Defer structs themselves are never on the stack.
+ for d := gp._defer; d != nil; d = d.link {
+ adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
+ adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
+ adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
+ }
+}
+
+func adjustpanics(gp *g, adjinfo *adjustinfo) {
+ // Panics are on stack and already adjusted.
+ // Update pointer to head of list in G.
+ adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
+}
+
+func adjustsudogs(gp *g, adjinfo *adjustinfo) {
+ // the data elements pointed to by a SudoG structure
+ // might be in the stack.
+ for s := gp.waiting; s != nil; s = s.waitlink {
+ adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
+ adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
+ }
+}
+
+func adjuststkbar(gp *g, adjinfo *adjustinfo) {
+ for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
+ adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
+ }
+}
+
+func fillstack(stk stack, b byte) {
+ for p := stk.lo; p < stk.hi; p++ {
+ *(*byte)(unsafe.Pointer(p)) = b
+ }
+}
+
+// Copies gp's stack to a new stack of a different size.
+// Caller must have changed gp status to Gcopystack.
+func copystack(gp *g, newsize uintptr) {
+ if gp.syscallsp != 0 {
+ throw("stack growth not allowed in system call")
+ }
+ old := gp.stack
+ if old.lo == 0 {
+ throw("nil stackbase")
+ }
+ used := old.hi - gp.sched.sp
+
+ // allocate new stack
+ new, newstkbar := stackalloc(uint32(newsize))
+ if stackPoisonCopy != 0 {
+ fillstack(new, 0xfd)
+ }
+ if stackDebug >= 1 {
+ print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
+ }
+
+ // Disallow sigprof scans of this stack and block if there's
+ // one in progress.
+ gcLockStackBarriers(gp)
+
+ // adjust pointers in the to-be-copied frames
+ var adjinfo adjustinfo
+ adjinfo.old = old
+ adjinfo.delta = new.hi - old.hi
+ gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
+
+ // adjust other miscellaneous things that have pointers into stacks.
+ adjustctxt(gp, &adjinfo)
+ adjustdefers(gp, &adjinfo)
+ adjustpanics(gp, &adjinfo)
+ adjustsudogs(gp, &adjinfo)
+ adjuststkbar(gp, &adjinfo)
+
+ // copy the stack to the new location
+ if stackPoisonCopy != 0 {
+ fillstack(new, 0xfb)
+ }
+ memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
+
+ // copy old stack barriers to new stack barrier array
+ newstkbar = newstkbar[:len(gp.stkbar)]
+ copy(newstkbar, gp.stkbar)
+
+ // Swap out old stack for new one
+ gp.stack = new
+ gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
+ gp.sched.sp = new.hi - used
+ oldsize := gp.stackAlloc
+ gp.stackAlloc = newsize
+ gp.stkbar = newstkbar
+ gp.stktopsp += adjinfo.delta
+
+ gcUnlockStackBarriers(gp)
+
+ // free old stack
+ if stackPoisonCopy != 0 {
+ fillstack(old, 0xfc)
+ }
+ stackfree(old, oldsize)
+}
+
+// round x up to a power of 2.
+func round2(x int32) int32 {
+ s := uint(0)
+ for 1<<s < x {
+ s++
+ }
+ return 1 << s
+}
+
+// Called from runtime·morestack when more stack is needed.
+// Allocate larger stack and relocate to new stack.
+// Stack growth is multiplicative, for constant amortized cost.
+//
+// g->atomicstatus will be Grunning or Gscanrunning upon entry.
+// If the GC is trying to stop this g then it will set preemptscan to true.
+func newstack() {
+ thisg := getg()
+ // TODO: double check all gp. shouldn't be getg().
+ if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
+ throw("stack growth after fork")
+ }
+ if thisg.m.morebuf.g.ptr() != thisg.m.curg {
+ print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
+ morebuf := thisg.m.morebuf
+ traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
+ throw("runtime: wrong goroutine in newstack")
+ }
+ if thisg.m.curg.throwsplit {
+ gp := thisg.m.curg
+ // Update syscallsp, syscallpc in case traceback uses them.
+ morebuf := thisg.m.morebuf
+ gp.syscallsp = morebuf.sp
+ gp.syscallpc = morebuf.pc
+ print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
+ "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
+ "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
+
+ traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
+ throw("runtime: stack split at bad time")
+ }
+
+ gp := thisg.m.curg
+ morebuf := thisg.m.morebuf
+ thisg.m.morebuf.pc = 0
+ thisg.m.morebuf.lr = 0
+ thisg.m.morebuf.sp = 0
+ thisg.m.morebuf.g = 0
+ rewindmorestack(&gp.sched)
+
+ // NOTE: stackguard0 may change underfoot, if another thread
+ // is about to try to preempt gp. Read it just once and use that same
+ // value now and below.
+ preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
+
+ // Be conservative about where we preempt.
+ // We are interested in preempting user Go code, not runtime code.
+ // If we're holding locks, mallocing, or preemption is disabled, don't
+ // preempt.
+ // This check is very early in newstack so that even the status change
+ // from Grunning to Gwaiting and back doesn't happen in this case.
+ // That status change by itself can be viewed as a small preemption,
+ // because the GC might change Gwaiting to Gscanwaiting, and then
+ // this goroutine has to wait for the GC to finish before continuing.
+ // If the GC is in some way dependent on this goroutine (for example,
+ // it needs a lock held by the goroutine), that small preemption turns
+ // into a real deadlock.
+ if preempt {
+ if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
+ // Let the goroutine keep running for now.
+ // gp->preempt is set, so it will be preempted next time.
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ gogo(&gp.sched) // never return
+ }
+ }
+
+ // The goroutine must be executing in order to call newstack,
+ // so it must be Grunning (or Gscanrunning).
+ casgstatus(gp, _Grunning, _Gwaiting)
+ gp.waitreason = "stack growth"
+
+ if gp.stack.lo == 0 {
+ throw("missing stack in newstack")
+ }
+ sp := gp.sched.sp
+ if sys.TheChar == '6' || sys.TheChar == '8' {
+ // The call to morestack cost a word.
+ sp -= sys.PtrSize
+ }
+ if stackDebug >= 1 || sp < gp.stack.lo {
+ print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
+ "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
+ "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
+ }
+ if sp < gp.stack.lo {
+ print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
+ print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
+ throw("runtime: split stack overflow")
+ }
+
+ if gp.sched.ctxt != nil {
+ // morestack wrote sched.ctxt on its way in here,
+ // without a write barrier. Run the write barrier now.
+ // It is not possible to be preempted between then
+ // and now, so it's okay.
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
+ }
+
+ if preempt {
+ if gp == thisg.m.g0 {
+ throw("runtime: preempt g0")
+ }
+ if thisg.m.p == 0 && thisg.m.locks == 0 {
+ throw("runtime: g is running but p is not")
+ }
+ if gp.preemptscan {
+ for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
+ // Likely to be racing with the GC as
+ // it sees a _Gwaiting and does the
+ // stack scan. If so, gcworkdone will
+ // be set and gcphasework will simply
+ // return.
+ }
+ if !gp.gcscandone {
+ scanstack(gp)
+ gp.gcscandone = true
+ }
+ gp.preemptscan = false
+ gp.preempt = false
+ casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
+ casgstatus(gp, _Gwaiting, _Grunning)
+ gp.stackguard0 = gp.stack.lo + _StackGuard
+ gogo(&gp.sched) // never return
+ }
+
+ // Act like goroutine called runtime.Gosched.
+ casgstatus(gp, _Gwaiting, _Grunning)
+ gopreempt_m(gp) // never return
+ }
+
+ // Allocate a bigger segment and move the stack.
+ oldsize := int(gp.stackAlloc)
+ newsize := oldsize * 2
+ if uintptr(newsize) > maxstacksize {
+ print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
+ throw("stack overflow")
+ }
+
+ casgstatus(gp, _Gwaiting, _Gcopystack)
+
+ // The concurrent GC will not scan the stack while we are doing the copy since
+ // the gp is in a Gcopystack status.
+ copystack(gp, uintptr(newsize))
+ if stackDebug >= 1 {
+ print("stack grow done\n")
+ }
+ casgstatus(gp, _Gcopystack, _Grunning)
+ gogo(&gp.sched)
+}
+
+//go:nosplit
+func nilfunc() {
+ *(*uint8)(nil) = 0
+}
+
+// adjust Gobuf as if it executed a call to fn
+// and then did an immediate gosave.
+func gostartcallfn(gobuf *gobuf, fv *funcval) {
+ var fn unsafe.Pointer
+ if fv != nil {
+ fn = unsafe.Pointer(fv.fn)
+ } else {
+ fn = unsafe.Pointer(funcPC(nilfunc))
+ }
+ gostartcall(gobuf, fn, unsafe.Pointer(fv))
+}
+
+// Maybe shrink the stack being used by gp.
+// Called at garbage collection time.
+func shrinkstack(gp *g) {
+ if readgstatus(gp) == _Gdead {
+ if gp.stack.lo != 0 {
+ // Free whole stack - it will get reallocated
+ // if G is used again.
+ stackfree(gp.stack, gp.stackAlloc)
+ gp.stack.lo = 0
+ gp.stack.hi = 0
+ gp.stkbar = nil
+ gp.stkbarPos = 0
+ }
+ return
+ }
+ if gp.stack.lo == 0 {
+ throw("missing stack in shrinkstack")
+ }
+
+ if debug.gcshrinkstackoff > 0 {
+ return
+ }
+
+ oldsize := gp.stackAlloc
+ newsize := oldsize / 2
+ // Don't shrink the allocation below the minimum-sized stack
+ // allocation.
+ if newsize < _FixedStack {
+ return
+ }
+ // Compute how much of the stack is currently in use and only
+ // shrink the stack if gp is using less than a quarter of its
+ // current stack. The currently used stack includes everything
+ // down to the SP plus the stack guard space that ensures
+ // there's room for nosplit functions.
+ avail := gp.stack.hi - gp.stack.lo
+ if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
+ return
+ }
+
+ // We can't copy the stack if we're in a syscall.
+ // The syscall might have pointers into the stack.
+ if gp.syscallsp != 0 {
+ return
+ }
+ if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
+ return
+ }
+
+ if stackDebug > 0 {
+ print("shrinking stack ", oldsize, "->", newsize, "\n")
+ }
+
+ oldstatus := casgcopystack(gp)
+ copystack(gp, newsize)
+ casgstatus(gp, _Gcopystack, oldstatus)
+}
+
+// freeStackSpans frees unused stack spans at the end of GC.
+func freeStackSpans() {
+ lock(&stackpoolmu)
+
+ // Scan stack pools for empty stack spans.
+ for order := range stackpool {
+ list := &stackpool[order]
+ for s := list.first; s != nil; {
+ next := s.next
+ if s.ref == 0 {
+ list.remove(s)
+ s.freelist = 0
+ mheap_.freeStack(s)
+ }
+ s = next
+ }
+ }
+
+ unlock(&stackpoolmu)
+
+ // Free large stack spans.
+ lock(&stackLarge.lock)
+ for i := range stackLarge.free {
+ for s := stackLarge.free[i].first; s != nil; {
+ next := s.next
+ stackLarge.free[i].remove(s)
+ mheap_.freeStack(s)
+ s = next
+ }
+ }
+ unlock(&stackLarge.lock)
+}
+
+//go:nosplit
+func morestackc() {
+ systemstack(func() {
+ throw("attempt to execute C code on Go stack")
+ })
+}
diff --git a/libgo/go/runtime/string_test.go b/libgo/go/runtime/string_test.go
index 71bd830..e0967b3 100644
--- a/libgo/go/runtime/string_test.go
+++ b/libgo/go/runtime/string_test.go
@@ -126,7 +126,7 @@ func TestStringW(t *testing.T) {
*/
func TestLargeStringConcat(t *testing.T) {
- output := executeTest(t, largeStringConcatSource, nil)
+ output := runTestProg(t, "testprog", "stringconcat")
want := "panic: " + strings.Repeat("0", 1<<10) + strings.Repeat("1", 1<<10) +
strings.Repeat("2", 1<<10) + strings.Repeat("3", 1<<10)
if !strings.HasPrefix(output, want) {
@@ -134,19 +134,6 @@ func TestLargeStringConcat(t *testing.T) {
}
}
-var largeStringConcatSource = `
-package main
-import "strings"
-func main() {
- s0 := strings.Repeat("0", 1<<10)
- s1 := strings.Repeat("1", 1<<10)
- s2 := strings.Repeat("2", 1<<10)
- s3 := strings.Repeat("3", 1<<10)
- s := s0 + s1 + s2 + s3
- panic(s)
-}
-`
-
/*
func TestGostringnocopy(t *testing.T) {
max := *runtime.Maxstring
diff --git a/libgo/go/runtime/sys_mips64x.go b/libgo/go/runtime/sys_mips64x.go
new file mode 100644
index 0000000..9e7d805
--- /dev/null
+++ b/libgo/go/runtime/sys_mips64x.go
@@ -0,0 +1,43 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build mips64 mips64le
+
+package runtime
+
+import "unsafe"
+
+// adjust Gobuf as if it executed a call to fn with context ctxt
+// and then did an immediate Gosave.
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ if buf.lr != 0 {
+ throw("invalid use of gostartcall")
+ }
+ buf.lr = buf.pc
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
+}
+
+// Called to rewind context saved during morestack back to beginning of function.
+// To help us, the linker emits a jmp back to the beginning right after the
+// call to morestack. We just have to decode and apply that jump.
+func rewindmorestack(buf *gobuf) {
+ var inst uint32
+ if buf.pc&3 == 0 && buf.pc != 0 {
+ inst = *(*uint32)(unsafe.Pointer(buf.pc))
+ if inst>>26 == 2 { // JMP addr
+ //print("runtime: rewind pc=", hex(buf.pc), " to pc=", hex(buf.pc &^ uintptr(1<<28-1) | uintptr((inst&^0xfc000000)<<2)), "\n");
+ buf.pc &^= 1<<28 - 1
+ buf.pc |= uintptr((inst &^ 0xfc000000) << 2)
+ return
+ }
+ if inst>>16 == 0x1000 { // BEQ R0, R0, offset
+ //print("runtime: rewind pc=", hex(buf.pc), " to pc=", hex(buf.pc + uintptr(int32(int16(inst&0xffff))<<2 + 4)), "\n");
+ buf.pc += uintptr(int32(int16(inst&0xffff))<<2 + 4)
+ return
+ }
+ }
+ print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
+ throw("runtime: misuse of rewindmorestack")
+}
diff --git a/libgo/go/runtime/sys_nonppc64x.go b/libgo/go/runtime/sys_nonppc64x.go
new file mode 100644
index 0000000..4409374
--- /dev/null
+++ b/libgo/go/runtime/sys_nonppc64x.go
@@ -0,0 +1,10 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !ppc64,!ppc64le
+
+package runtime
+
+func prepGoExitFrame(sp uintptr) {
+}
diff --git a/libgo/go/runtime/testdata/testprog/crash.go b/libgo/go/runtime/testdata/testprog/crash.go
new file mode 100644
index 0000000..3d7c7c6
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/crash.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func init() {
+ register("Crash", Crash)
+}
+
+func test(name string) {
+ defer func() {
+ if x := recover(); x != nil {
+ fmt.Printf(" recovered")
+ }
+ fmt.Printf(" done\n")
+ }()
+ fmt.Printf("%s:", name)
+ var s *string
+ _ = *s
+ fmt.Print("SHOULD NOT BE HERE")
+}
+
+func testInNewThread(name string) {
+ c := make(chan bool)
+ go func() {
+ runtime.LockOSThread()
+ test(name)
+ c <- true
+ }()
+ <-c
+}
+
+func Crash() {
+ runtime.LockOSThread()
+ test("main")
+ testInNewThread("new-thread")
+ testInNewThread("second-new-thread")
+ test("main-again")
+}
diff --git a/libgo/go/runtime/testdata/testprog/deadlock.go b/libgo/go/runtime/testdata/testprog/deadlock.go
new file mode 100644
index 0000000..7f0a0cd
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/deadlock.go
@@ -0,0 +1,173 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+func init() {
+ registerInit("InitDeadlock", InitDeadlock)
+ registerInit("NoHelperGoroutines", NoHelperGoroutines)
+
+ register("SimpleDeadlock", SimpleDeadlock)
+ register("LockedDeadlock", LockedDeadlock)
+ register("LockedDeadlock2", LockedDeadlock2)
+ register("GoexitDeadlock", GoexitDeadlock)
+ register("StackOverflow", StackOverflow)
+ register("ThreadExhaustion", ThreadExhaustion)
+ register("RecursivePanic", RecursivePanic)
+ register("GoexitExit", GoexitExit)
+ register("GoNil", GoNil)
+ register("MainGoroutineID", MainGoroutineID)
+ register("Breakpoint", Breakpoint)
+ register("GoexitInPanic", GoexitInPanic)
+ register("PanicAfterGoexit", PanicAfterGoexit)
+ register("RecoveredPanicAfterGoexit", RecoveredPanicAfterGoexit)
+
+}
+
+func SimpleDeadlock() {
+ select {}
+ panic("not reached")
+}
+
+func InitDeadlock() {
+ select {}
+ panic("not reached")
+}
+
+func LockedDeadlock() {
+ runtime.LockOSThread()
+ select {}
+}
+
+func LockedDeadlock2() {
+ go func() {
+ runtime.LockOSThread()
+ select {}
+ }()
+ time.Sleep(time.Millisecond)
+ select {}
+}
+
+func GoexitDeadlock() {
+ F := func() {
+ for i := 0; i < 10; i++ {
+ }
+ }
+
+ go F()
+ go F()
+ runtime.Goexit()
+}
+
+func StackOverflow() {
+ var f func() byte
+ f = func() byte {
+ var buf [64 << 10]byte
+ return buf[0] + f()
+ }
+ debug.SetMaxStack(1474560)
+ f()
+}
+
+func ThreadExhaustion() {
+ debug.SetMaxThreads(10)
+ c := make(chan int)
+ for i := 0; i < 100; i++ {
+ go func() {
+ runtime.LockOSThread()
+ c <- 0
+ select {}
+ }()
+ <-c
+ }
+}
+
+func RecursivePanic() {
+ func() {
+ defer func() {
+ fmt.Println(recover())
+ }()
+ var x [8192]byte
+ func(x [8192]byte) {
+ defer func() {
+ if err := recover(); err != nil {
+ panic("wrap: " + err.(string))
+ }
+ }()
+ panic("bad")
+ }(x)
+ }()
+ panic("again")
+}
+
+func GoexitExit() {
+ go func() {
+ time.Sleep(time.Millisecond)
+ }()
+ i := 0
+ runtime.SetFinalizer(&i, func(p *int) {})
+ runtime.GC()
+ runtime.Goexit()
+}
+
+func GoNil() {
+ defer func() {
+ recover()
+ }()
+ var f func()
+ go f()
+ select {}
+}
+
+func MainGoroutineID() {
+ panic("test")
+}
+
+func NoHelperGoroutines() {
+ i := 0
+ runtime.SetFinalizer(&i, func(p *int) {})
+ time.AfterFunc(time.Hour, func() {})
+ panic("oops")
+}
+
+func Breakpoint() {
+ runtime.Breakpoint()
+}
+
+func GoexitInPanic() {
+ go func() {
+ defer func() {
+ runtime.Goexit()
+ }()
+ panic("hello")
+ }()
+ runtime.Goexit()
+}
+
+func PanicAfterGoexit() {
+ defer func() {
+ panic("hello")
+ }()
+ runtime.Goexit()
+}
+
+func RecoveredPanicAfterGoexit() {
+ defer func() {
+ defer func() {
+ r := recover()
+ if r == nil {
+ panic("bad recover")
+ }
+ }()
+ panic("hello")
+ }()
+ runtime.Goexit()
+}
diff --git a/libgo/go/runtime/testdata/testprog/gc.go b/libgo/go/runtime/testdata/testprog/gc.go
new file mode 100644
index 0000000..9bb367c
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/gc.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+)
+
+func init() {
+ register("GCFairness", GCFairness)
+ register("GCSys", GCSys)
+}
+
+func GCSys() {
+ runtime.GOMAXPROCS(1)
+ memstats := new(runtime.MemStats)
+ runtime.GC()
+ runtime.ReadMemStats(memstats)
+ sys := memstats.Sys
+
+ runtime.MemProfileRate = 0 // disable profiler
+
+ itercount := 100000
+ for i := 0; i < itercount; i++ {
+ workthegc()
+ }
+
+ // Should only be using a few MB.
+ // We allocated 100 MB or (if not short) 1 GB.
+ runtime.ReadMemStats(memstats)
+ if sys > memstats.Sys {
+ sys = 0
+ } else {
+ sys = memstats.Sys - sys
+ }
+ if sys > 16<<20 {
+ fmt.Printf("using too much memory: %d bytes\n", sys)
+ return
+ }
+ fmt.Printf("OK\n")
+}
+
+func workthegc() []byte {
+ return make([]byte, 1029)
+}
+
+func GCFairness() {
+ runtime.GOMAXPROCS(1)
+ f, err := os.Open("/dev/null")
+ if os.IsNotExist(err) {
+ // This test tests what it is intended to test only if writes are fast.
+ // If there is no /dev/null, we just don't execute the test.
+ fmt.Println("OK")
+ return
+ }
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ for i := 0; i < 2; i++ {
+ go func() {
+ for {
+ f.Write([]byte("."))
+ }
+ }()
+ }
+ time.Sleep(10 * time.Millisecond)
+ fmt.Println("OK")
+}
diff --git a/libgo/go/runtime/testdata/testprog/main.go b/libgo/go/runtime/testdata/testprog/main.go
new file mode 100644
index 0000000..9c227bb
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/main.go
@@ -0,0 +1,35 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+var cmds = map[string]func(){}
+
+func register(name string, f func()) {
+ if cmds[name] != nil {
+ panic("duplicate registration: " + name)
+ }
+ cmds[name] = f
+}
+
+func registerInit(name string, f func()) {
+ if len(os.Args) >= 2 && os.Args[1] == name {
+ f()
+ }
+}
+
+func main() {
+ if len(os.Args) < 2 {
+ println("usage: " + os.Args[0] + " name-of-test")
+ return
+ }
+ f := cmds[os.Args[1]]
+ if f == nil {
+ println("unknown function: " + os.Args[1])
+ return
+ }
+ f()
+}
diff --git a/libgo/go/runtime/testdata/testprog/misc.go b/libgo/go/runtime/testdata/testprog/misc.go
new file mode 100644
index 0000000..237680f
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/misc.go
@@ -0,0 +1,15 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "runtime"
+
+func init() {
+ register("NumGoroutine", NumGoroutine)
+}
+
+func NumGoroutine() {
+ println(runtime.NumGoroutine())
+}
diff --git a/libgo/go/runtime/testdata/testprog/signal.go b/libgo/go/runtime/testdata/testprog/signal.go
new file mode 100644
index 0000000..ac2d3e8
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/signal.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows,!plan9,!nacl
+
+package main
+
+import "syscall"
+
+func init() {
+ register("SignalExitStatus", SignalExitStatus)
+}
+
+func SignalExitStatus() {
+ syscall.Kill(syscall.Getpid(), syscall.SIGTERM)
+}
diff --git a/libgo/go/runtime/testdata/testprog/stringconcat.go b/libgo/go/runtime/testdata/testprog/stringconcat.go
new file mode 100644
index 0000000..9dddf19
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/stringconcat.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "strings"
+
+func init() {
+ register("stringconcat", stringconcat)
+}
+
+func stringconcat() {
+ s0 := strings.Repeat("0", 1<<10)
+ s1 := strings.Repeat("1", 1<<10)
+ s2 := strings.Repeat("2", 1<<10)
+ s3 := strings.Repeat("3", 1<<10)
+ s := s0 + s1 + s2 + s3
+ panic(s)
+}
diff --git a/libgo/go/runtime/testdata/testprog/syscall_windows.go b/libgo/go/runtime/testdata/testprog/syscall_windows.go
new file mode 100644
index 0000000..73165be
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprog/syscall_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "syscall"
+
+func init() {
+ register("RaiseException", RaiseException)
+ register("ZeroDivisionException", ZeroDivisionException)
+}
+
+func RaiseException() {
+ const EXCEPTION_NONCONTINUABLE = 1
+ mod := syscall.MustLoadDLL("kernel32.dll")
+ proc := mod.MustFindProc("RaiseException")
+ proc.Call(0xbad, EXCEPTION_NONCONTINUABLE, 0, 0)
+ println("RaiseException should not return")
+}
+
+func ZeroDivisionException() {
+ x := 1
+ y := 0
+ z := x / y
+ println(z)
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/callback.go b/libgo/go/runtime/testdata/testprogcgo/callback.go
new file mode 100644
index 0000000..10e248a
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/callback.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package main
+
+/*
+#include <pthread.h>
+
+void go_callback();
+
+static void *thr(void *arg) {
+ go_callback();
+ return 0;
+}
+
+static void foo() {
+ pthread_t th;
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setstacksize(&attr, 256 << 10);
+ pthread_create(&th, &attr, thr, 0);
+ pthread_join(th, 0);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func init() {
+ register("CgoCallbackGC", CgoCallbackGC)
+}
+
+//export go_callback
+func go_callback() {
+ runtime.GC()
+ grow()
+ runtime.GC()
+}
+
+var cnt int
+
+func grow() {
+ x := 10000
+ sum := 0
+ if grow1(&x, &sum) == 0 {
+ panic("bad")
+ }
+}
+
+func grow1(x, sum *int) int {
+ if *x == 0 {
+ return *sum + 1
+ }
+ *x--
+ sum1 := *sum + *x
+ return grow1(x, &sum1)
+}
+
+func CgoCallbackGC() {
+ const P = 100
+ done := make(chan bool)
+ // allocate a bunch of stack frames and spray them with pointers
+ for i := 0; i < P; i++ {
+ go func() {
+ grow()
+ done <- true
+ }()
+ }
+ for i := 0; i < P; i++ {
+ <-done
+ }
+ // now give these stack frames to cgo callbacks
+ for i := 0; i < P; i++ {
+ go func() {
+ C.foo()
+ done <- true
+ }()
+ }
+ for i := 0; i < P; i++ {
+ <-done
+ }
+ fmt.Printf("OK\n")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/cgo.go b/libgo/go/runtime/testdata/testprogcgo/cgo.go
new file mode 100644
index 0000000..cf1af82
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/cgo.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+void foo1(void) {}
+*/
+import "C"
+import (
+ "fmt"
+ "runtime"
+ "time"
+)
+
+func init() {
+ register("CgoSignalDeadlock", CgoSignalDeadlock)
+ register("CgoTraceback", CgoTraceback)
+}
+
+func CgoSignalDeadlock() {
+ runtime.GOMAXPROCS(100)
+ ping := make(chan bool)
+ go func() {
+ for i := 0; ; i++ {
+ runtime.Gosched()
+ select {
+ case done := <-ping:
+ if done {
+ ping <- true
+ return
+ }
+ ping <- true
+ default:
+ }
+ func() {
+ defer func() {
+ recover()
+ }()
+ var s *string
+ *s = ""
+ }()
+ }
+ }()
+ time.Sleep(time.Millisecond)
+ for i := 0; i < 64; i++ {
+ go func() {
+ runtime.LockOSThread()
+ select {}
+ }()
+ go func() {
+ runtime.LockOSThread()
+ select {}
+ }()
+ time.Sleep(time.Millisecond)
+ ping <- false
+ select {
+ case <-ping:
+ case <-time.After(time.Second):
+ fmt.Printf("HANG\n")
+ return
+ }
+ }
+ ping <- true
+ select {
+ case <-ping:
+ case <-time.After(time.Second):
+ fmt.Printf("HANG\n")
+ return
+ }
+ fmt.Printf("OK\n")
+}
+
+func CgoTraceback() {
+ C.foo1()
+ buf := make([]byte, 1)
+ runtime.Stack(buf, true)
+ fmt.Printf("OK\n")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/crash.go b/libgo/go/runtime/testdata/testprogcgo/crash.go
new file mode 100644
index 0000000..3d7c7c6
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/crash.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func init() {
+ register("Crash", Crash)
+}
+
+func test(name string) {
+ defer func() {
+ if x := recover(); x != nil {
+ fmt.Printf(" recovered")
+ }
+ fmt.Printf(" done\n")
+ }()
+ fmt.Printf("%s:", name)
+ var s *string
+ _ = *s
+ fmt.Print("SHOULD NOT BE HERE")
+}
+
+func testInNewThread(name string) {
+ c := make(chan bool)
+ go func() {
+ runtime.LockOSThread()
+ test(name)
+ c <- true
+ }()
+ <-c
+}
+
+func Crash() {
+ runtime.LockOSThread()
+ test("main")
+ testInNewThread("new-thread")
+ testInNewThread("second-new-thread")
+ test("main-again")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/dll_windows.go b/libgo/go/runtime/testdata/testprogcgo/dll_windows.go
new file mode 100644
index 0000000..a0647ef
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/dll_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+/*
+#include <windows.h>
+
+DWORD getthread() {
+ return GetCurrentThreadId();
+}
+*/
+import "C"
+import "./windows"
+
+func init() {
+ register("CgoDLLImportsMain", CgoDLLImportsMain)
+}
+
+func CgoDLLImportsMain() {
+ C.getthread()
+ windows.GetThread()
+ println("OK")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/dropm.go b/libgo/go/runtime/testdata/testprogcgo/dropm.go
new file mode 100644
index 0000000..75984ea
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/dropm.go
@@ -0,0 +1,59 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+// Test that a sequence of callbacks from C to Go get the same m.
+// This failed to be true on arm and arm64, which was the root cause
+// of issue 13881.
+
+package main
+
+/*
+#include <stddef.h>
+#include <pthread.h>
+
+extern void GoCheckM();
+
+static void* thread(void* arg __attribute__ ((unused))) {
+ GoCheckM();
+ return NULL;
+}
+
+static void CheckM() {
+ pthread_t tid;
+ pthread_create(&tid, NULL, thread, NULL);
+ pthread_join(tid, NULL);
+ pthread_create(&tid, NULL, thread, NULL);
+ pthread_join(tid, NULL);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "os"
+)
+
+func init() {
+ register("EnsureDropM", EnsureDropM)
+}
+
+var savedM uintptr
+
+//export GoCheckM
+func GoCheckM() {
+ m := runtime_getm_for_test()
+ if savedM == 0 {
+ savedM = m
+ } else if savedM != m {
+ fmt.Printf("m == %x want %x\n", m, savedM)
+ os.Exit(1)
+ }
+}
+
+func EnsureDropM() {
+ C.CheckM()
+ fmt.Println("OK")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/dropm_stub.go b/libgo/go/runtime/testdata/testprogcgo/dropm_stub.go
new file mode 100644
index 0000000..4c3f46a
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/dropm_stub.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import _ "unsafe" // for go:linkname
+
+// Defined in the runtime package.
+//go:linkname runtime_getm_for_test runtime.getm
+func runtime_getm_for_test() uintptr
diff --git a/libgo/go/runtime/testdata/testprogcgo/exec.go b/libgo/go/runtime/testdata/testprogcgo/exec.go
new file mode 100644
index 0000000..8dc1d51
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/exec.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package main
+
+/*
+#include <stddef.h>
+#include <signal.h>
+#include <pthread.h>
+
+// Save the signal mask at startup so that we see what it is before
+// the Go runtime starts setting up signals.
+
+static sigset_t mask;
+
+static void init(void) __attribute__ ((constructor));
+
+static void init() {
+ sigemptyset(&mask);
+ pthread_sigmask(SIG_SETMASK, NULL, &mask);
+}
+
+int SIGINTBlocked() {
+ return sigismember(&mask, SIGINT);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "os/signal"
+ "sync"
+ "syscall"
+)
+
+func init() {
+ register("CgoExecSignalMask", CgoExecSignalMask)
+}
+
+func CgoExecSignalMask() {
+ if len(os.Args) > 2 && os.Args[2] == "testsigint" {
+ if C.SIGINTBlocked() != 0 {
+ os.Exit(1)
+ }
+ os.Exit(0)
+ }
+
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, syscall.SIGTERM)
+ go func() {
+ for range c {
+ }
+ }()
+
+ const goCount = 10
+ const execCount = 10
+ var wg sync.WaitGroup
+ wg.Add(goCount*execCount + goCount)
+ for i := 0; i < goCount; i++ {
+ go func() {
+ defer wg.Done()
+ for j := 0; j < execCount; j++ {
+ c2 := make(chan os.Signal, 1)
+ signal.Notify(c2, syscall.SIGUSR1)
+ syscall.Kill(os.Getpid(), syscall.SIGTERM)
+ go func(j int) {
+ defer wg.Done()
+ cmd := exec.Command(os.Args[0], "CgoExecSignalMask", "testsigint")
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ fmt.Printf("iteration %d: %v\n", j, err)
+ os.Exit(1)
+ }
+ }(j)
+ signal.Stop(c2)
+ }
+ }()
+ }
+ wg.Wait()
+
+ fmt.Println("OK")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/main.go b/libgo/go/runtime/testdata/testprogcgo/main.go
new file mode 100644
index 0000000..9c227bb
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/main.go
@@ -0,0 +1,35 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+var cmds = map[string]func(){}
+
+func register(name string, f func()) {
+ if cmds[name] != nil {
+ panic("duplicate registration: " + name)
+ }
+ cmds[name] = f
+}
+
+func registerInit(name string, f func()) {
+ if len(os.Args) >= 2 && os.Args[1] == name {
+ f()
+ }
+}
+
+func main() {
+ if len(os.Args) < 2 {
+ println("usage: " + os.Args[0] + " name-of-test")
+ return
+ }
+ f := cmds[os.Args[1]]
+ if f == nil {
+ println("unknown function: " + os.Args[1])
+ return
+ }
+ f()
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/threadpanic.go b/libgo/go/runtime/testdata/testprogcgo/threadpanic.go
new file mode 100644
index 0000000..3c9baba
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/threadpanic.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+package main
+
+// void start(void);
+import "C"
+
+func init() {
+ register("CgoExternalThreadPanic", CgoExternalThreadPanic)
+}
+
+func CgoExternalThreadPanic() {
+ C.start()
+ select {}
+}
+
+//export gopanic
+func gopanic() {
+ panic("BOOM")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/threadprof.go b/libgo/go/runtime/testdata/testprogcgo/threadprof.go
new file mode 100644
index 0000000..03e35d2
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/threadprof.go
@@ -0,0 +1,93 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package main
+
+/*
+#include <stdint.h>
+#include <signal.h>
+#include <pthread.h>
+
+volatile int32_t spinlock;
+
+static void *thread1(void *p) {
+ (void)p;
+ while (spinlock == 0)
+ ;
+ pthread_kill(pthread_self(), SIGPROF);
+ spinlock = 0;
+ return NULL;
+}
+__attribute__((constructor)) void issue9456() {
+ pthread_t tid;
+ pthread_create(&tid, 0, thread1, NULL);
+}
+
+void **nullptr;
+
+void *crash(void *p) {
+ *nullptr = p;
+ return 0;
+}
+
+int start_crashing_thread(void) {
+ pthread_t tid;
+ return pthread_create(&tid, 0, crash, 0);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "runtime"
+ "sync/atomic"
+ "time"
+ "unsafe"
+)
+
+func init() {
+ register("CgoExternalThreadSIGPROF", CgoExternalThreadSIGPROF)
+ register("CgoExternalThreadSignal", CgoExternalThreadSignal)
+}
+
+func CgoExternalThreadSIGPROF() {
+ // This test intends to test that sending SIGPROF to foreign threads
+ // before we make any cgo call will not abort the whole process, so
+ // we cannot make any cgo call here. See https://golang.org/issue/9456.
+ atomic.StoreInt32((*int32)(unsafe.Pointer(&C.spinlock)), 1)
+ for atomic.LoadInt32((*int32)(unsafe.Pointer(&C.spinlock))) == 1 {
+ runtime.Gosched()
+ }
+ println("OK")
+}
+
+func CgoExternalThreadSignal() {
+ if len(os.Args) > 2 && os.Args[2] == "crash" {
+ i := C.start_crashing_thread()
+ if i != 0 {
+ fmt.Println("pthread_create failed:", i)
+ // Exit with 0 because parent expects us to crash.
+ return
+ }
+
+ // We should crash immediately, but give it plenty of
+ // time before failing (by exiting 0) in case we are
+ // running on a slow system.
+ time.Sleep(5 * time.Second)
+ return
+ }
+
+ out, err := exec.Command(os.Args[0], "CgoExternalThreadSignal", "crash").CombinedOutput()
+ if err == nil {
+ fmt.Println("C signal did not crash as expected\n")
+ fmt.Printf("%s\n", out)
+ os.Exit(1)
+ }
+
+ fmt.Println("OK")
+}
diff --git a/libgo/go/runtime/testdata/testprogcgo/windows/win.go b/libgo/go/runtime/testdata/testprogcgo/windows/win.go
new file mode 100644
index 0000000..f2eabb9
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprogcgo/windows/win.go
@@ -0,0 +1,16 @@
+package windows
+
+/*
+#cgo CFLAGS: -mnop-fun-dllimport
+
+#include <windows.h>
+
+DWORD agetthread() {
+ return GetCurrentThreadId();
+}
+*/
+import "C"
+
+func GetThread() uint32 {
+ return uint32(C.agetthread())
+}
diff --git a/libgo/go/runtime/testdata/testprognet/main.go b/libgo/go/runtime/testdata/testprognet/main.go
new file mode 100644
index 0000000..9c227bb
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprognet/main.go
@@ -0,0 +1,35 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "os"
+
+var cmds = map[string]func(){}
+
+func register(name string, f func()) {
+ if cmds[name] != nil {
+ panic("duplicate registration: " + name)
+ }
+ cmds[name] = f
+}
+
+func registerInit(name string, f func()) {
+ if len(os.Args) >= 2 && os.Args[1] == name {
+ f()
+ }
+}
+
+func main() {
+ if len(os.Args) < 2 {
+ println("usage: " + os.Args[0] + " name-of-test")
+ return
+ }
+ f := cmds[os.Args[1]]
+ if f == nil {
+ println("unknown function: " + os.Args[1])
+ return
+ }
+ f()
+}
diff --git a/libgo/go/runtime/testdata/testprognet/net.go b/libgo/go/runtime/testdata/testprognet/net.go
new file mode 100644
index 0000000..c1a7f3f
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprognet/net.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "net"
+)
+
+func init() {
+ registerInit("NetpollDeadlock", NetpollDeadlockInit)
+ register("NetpollDeadlock", NetpollDeadlock)
+}
+
+func NetpollDeadlockInit() {
+ fmt.Println("dialing")
+ c, err := net.Dial("tcp", "localhost:14356")
+ if err == nil {
+ c.Close()
+ } else {
+ fmt.Println("error: ", err)
+ }
+}
+
+func NetpollDeadlock() {
+ fmt.Println("done")
+}
diff --git a/libgo/go/runtime/testdata/testprognet/signal.go b/libgo/go/runtime/testdata/testprognet/signal.go
new file mode 100644
index 0000000..24d1424
--- /dev/null
+++ b/libgo/go/runtime/testdata/testprognet/signal.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows,!plan9,!nacl
+
+// This is in testprognet instead of testprog because testprog
+// must not import anything (like net, but also like os/signal)
+// that kicks off background goroutines during init.
+
+package main
+
+import (
+ "os/signal"
+ "syscall"
+)
+
+func init() {
+ register("SignalIgnoreSIGTRAP", SignalIgnoreSIGTRAP)
+}
+
+func SignalIgnoreSIGTRAP() {
+ signal.Ignore(syscall.SIGTRAP)
+ syscall.Kill(syscall.Getpid(), syscall.SIGTRAP)
+ println("OK")
+}
diff --git a/libgo/go/runtime/write_err.go b/libgo/go/runtime/write_err.go
new file mode 100644
index 0000000..6b1467b
--- /dev/null
+++ b/libgo/go/runtime/write_err.go
@@ -0,0 +1,13 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !android
+
+package runtime
+
+import "unsafe"
+
+func writeErr(b []byte) {
+ write(2, unsafe.Pointer(&b[0]), int32(len(b)))
+}
diff --git a/libgo/go/runtime/write_err_android.go b/libgo/go/runtime/write_err_android.go
new file mode 100644
index 0000000..4411a14
--- /dev/null
+++ b/libgo/go/runtime/write_err_android.go
@@ -0,0 +1,160 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+var (
+ writeHeader = []byte{6 /* ANDROID_LOG_ERROR */, 'G', 'o', 0}
+ writePath = []byte("/dev/log/main\x00")
+ writeLogd = []byte("/dev/socket/logdw\x00")
+
+ // guarded by printlock/printunlock.
+ writeFD uintptr
+ writeBuf [1024]byte
+ writePos int
+)
+
+// Prior to Android-L, logging was done through writes to /dev/log files implemented
+// in kernel ring buffers. In Android-L, those /dev/log files are no longer
+// accessible and logging is done through a centralized user-mode logger, logd.
+//
+// https://android.googlesource.com/platform/system/core/+/master/liblog/logd_write.c
+type loggerType int32
+
+const (
+ unknown loggerType = iota
+ legacy
+ logd
+ // TODO(hakim): logging for emulator?
+)
+
+var logger loggerType
+
+func writeErr(b []byte) {
+ if logger == unknown {
+ // Use logd if /dev/socket/logdw is available.
+ if v := uintptr(access(&writeLogd[0], 0x02 /* W_OK */)); v == 0 {
+ logger = logd
+ initLogd()
+ } else {
+ logger = legacy
+ initLegacy()
+ }
+ }
+
+ // Write to stderr for command-line programs.
+ write(2, unsafe.Pointer(&b[0]), int32(len(b)))
+
+ // Log format: "<header>\x00<message m bytes>\x00"
+ //
+ // <header>
+ // In legacy mode: "<priority 1 byte><tag n bytes>".
+ // In logd mode: "<android_log_header_t 11 bytes><priority 1 byte><tag n bytes>"
+ //
+ // The entire log needs to be delivered in a single syscall (the NDK
+ // does this with writev). Each log is its own line, so we need to
+ // buffer writes until we see a newline.
+ var hlen int
+ switch logger {
+ case logd:
+ hlen = writeLogdHeader()
+ case legacy:
+ hlen = len(writeHeader)
+ }
+
+ dst := writeBuf[hlen:]
+ for _, v := range b {
+ if v == 0 { // android logging won't print a zero byte
+ v = '0'
+ }
+ dst[writePos] = v
+ writePos++
+ if v == '\n' || writePos == len(dst)-1 {
+ dst[writePos] = 0
+ write(writeFD, unsafe.Pointer(&writeBuf[0]), int32(hlen+writePos))
+ memclrBytes(dst)
+ writePos = 0
+ }
+ }
+}
+
+func initLegacy() {
+ // In legacy mode, logs are written to /dev/log/main
+ writeFD = uintptr(open(&writePath[0], 0x1 /* O_WRONLY */, 0))
+ if writeFD == 0 {
+ // It is hard to do anything here. Write to stderr just
+ // in case user has root on device and has run
+ // adb shell setprop log.redirect-stdio true
+ msg := []byte("runtime: cannot open /dev/log/main\x00")
+ write(2, unsafe.Pointer(&msg[0]), int32(len(msg)))
+ exit(2)
+ }
+
+ // Prepopulate the invariant header part.
+ copy(writeBuf[:len(writeHeader)], writeHeader)
+}
+
+// used in initLogdWrite but defined here to avoid heap allocation.
+var logdAddr sockaddr_un
+
+func initLogd() {
+ // In logd mode, logs are sent to the logd via a unix domain socket.
+ logdAddr.family = _AF_UNIX
+ copy(logdAddr.path[:], writeLogd)
+
+ // We are not using non-blocking I/O because writes taking this path
+ // are most likely triggered by panic, we cannot think of the advantage of
+ // non-blocking I/O for panic but see disadvantage (dropping panic message),
+ // and blocking I/O simplifies the code a lot.
+ fd := socket(_AF_UNIX, _SOCK_DGRAM|_O_CLOEXEC, 0)
+ if fd < 0 {
+ msg := []byte("runtime: cannot create a socket for logging\x00")
+ write(2, unsafe.Pointer(&msg[0]), int32(len(msg)))
+ exit(2)
+ }
+
+ errno := connect(fd, unsafe.Pointer(&logdAddr), int32(unsafe.Sizeof(logdAddr)))
+ if errno < 0 {
+ msg := []byte("runtime: cannot connect to /dev/socket/logdw\x00")
+ write(2, unsafe.Pointer(&msg[0]), int32(len(msg)))
+ // TODO(hakim): or should we just close fd and hope for better luck next time?
+ exit(2)
+ }
+ writeFD = uintptr(fd)
+
+ // Prepopulate invariant part of the header.
+ // The first 11 bytes will be populated later in writeLogdHeader.
+ copy(writeBuf[11:11+len(writeHeader)], writeHeader)
+}
+
+// writeLogdHeader populates the header and returns the length of the payload.
+func writeLogdHeader() int {
+ hdr := writeBuf[:11]
+
+ // The first 11 bytes of the header corresponds to android_log_header_t
+ // as defined in system/core/include/private/android_logger.h
+ // hdr[0] log type id (unsigned char), defined in <log/log.h>
+ // hdr[1:2] tid (uint16_t)
+ // hdr[3:11] log_time defined in <log/log_read.h>
+ // hdr[3:7] sec unsigned uint32, little endian.
+ // hdr[7:11] nsec unsigned uint32, little endian.
+ hdr[0] = 0 // LOG_ID_MAIN
+ sec, nsec := time_now()
+ packUint32(hdr[3:7], uint32(sec))
+ packUint32(hdr[7:11], uint32(nsec))
+
+ // TODO(hakim): hdr[1:2] = gettid?
+
+ return 11 + len(writeHeader)
+}
+
+func packUint32(b []byte, v uint32) {
+ // little-endian.
+ b[0] = byte(v)
+ b[1] = byte(v >> 8)
+ b[2] = byte(v >> 16)
+ b[3] = byte(v >> 24)
+}