aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2013-11-06 19:49:01 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2013-11-06 19:49:01 +0000
commitf038dae646bac2b31be98ab592c0e5206d2d96f5 (patch)
tree39530b071991b2326f881b2a30a2d82d6c133fd6 /libgo/go/runtime
parentf20f261304993444741e0f0a14d3147e591bc660 (diff)
downloadgcc-f038dae646bac2b31be98ab592c0e5206d2d96f5.zip
gcc-f038dae646bac2b31be98ab592c0e5206d2d96f5.tar.gz
gcc-f038dae646bac2b31be98ab592c0e5206d2d96f5.tar.bz2
libgo: Update to October 24 version of master library.
From-SVN: r204466
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/append_test.go48
-rw-r--r--libgo/go/runtime/crash_cgo_test.go34
-rw-r--r--libgo/go/runtime/crash_test.go90
-rw-r--r--libgo/go/runtime/debug/garbage.go34
-rw-r--r--libgo/go/runtime/debug/stack_test.go8
-rw-r--r--libgo/go/runtime/error.go16
-rw-r--r--libgo/go/runtime/export_test.go17
-rw-r--r--libgo/go/runtime/extern.go39
-rw-r--r--libgo/go/runtime/gc_test.go4
-rw-r--r--libgo/go/runtime/malloc_test.go156
-rw-r--r--libgo/go/runtime/map_test.go38
-rw-r--r--libgo/go/runtime/mapspeed_test.go62
-rw-r--r--libgo/go/runtime/mem.go4
-rw-r--r--libgo/go/runtime/memmove_test.go116
-rw-r--r--libgo/go/runtime/mfinal_test.go86
-rw-r--r--libgo/go/runtime/norace_test.go58
-rw-r--r--libgo/go/runtime/parfor_test.go5
-rw-r--r--libgo/go/runtime/pprof/pprof.go6
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go363
-rw-r--r--libgo/go/runtime/proc_test.go250
-rw-r--r--libgo/go/runtime/runtime_test.go46
21 files changed, 1349 insertions, 131 deletions
diff --git a/libgo/go/runtime/append_test.go b/libgo/go/runtime/append_test.go
index 3639018..937c825 100644
--- a/libgo/go/runtime/append_test.go
+++ b/libgo/go/runtime/append_test.go
@@ -38,10 +38,18 @@ func BenchmarkAppend4Bytes(b *testing.B) {
benchmarkAppendBytes(b, 4)
}
+func BenchmarkAppend7Bytes(b *testing.B) {
+ benchmarkAppendBytes(b, 7)
+}
+
func BenchmarkAppend8Bytes(b *testing.B) {
benchmarkAppendBytes(b, 8)
}
+func BenchmarkAppend15Bytes(b *testing.B) {
+ benchmarkAppendBytes(b, 15)
+}
+
func BenchmarkAppend16Bytes(b *testing.B) {
benchmarkAppendBytes(b, 16)
}
@@ -121,3 +129,43 @@ func TestAppendOverlap(t *testing.T) {
t.Errorf("overlap failed: got %q want %q", got, want)
}
}
+
+func benchmarkCopySlice(b *testing.B, l int) {
+ s := make([]byte, l)
+ buf := make([]byte, 4096)
+ var n int
+ for i := 0; i < b.N; i++ {
+ n = copy(buf, s)
+ }
+ b.SetBytes(int64(n))
+}
+
+func benchmarkCopyStr(b *testing.B, l int) {
+ s := string(make([]byte, l))
+ buf := make([]byte, 4096)
+ var n int
+ for i := 0; i < b.N; i++ {
+ n = copy(buf, s)
+ }
+ b.SetBytes(int64(n))
+}
+
+func BenchmarkCopy1Byte(b *testing.B) { benchmarkCopySlice(b, 1) }
+func BenchmarkCopy2Byte(b *testing.B) { benchmarkCopySlice(b, 2) }
+func BenchmarkCopy4Byte(b *testing.B) { benchmarkCopySlice(b, 4) }
+func BenchmarkCopy8Byte(b *testing.B) { benchmarkCopySlice(b, 8) }
+func BenchmarkCopy12Byte(b *testing.B) { benchmarkCopySlice(b, 12) }
+func BenchmarkCopy16Byte(b *testing.B) { benchmarkCopySlice(b, 16) }
+func BenchmarkCopy32Byte(b *testing.B) { benchmarkCopySlice(b, 32) }
+func BenchmarkCopy128Byte(b *testing.B) { benchmarkCopySlice(b, 128) }
+func BenchmarkCopy1024Byte(b *testing.B) { benchmarkCopySlice(b, 1024) }
+
+func BenchmarkCopy1String(b *testing.B) { benchmarkCopyStr(b, 1) }
+func BenchmarkCopy2String(b *testing.B) { benchmarkCopyStr(b, 2) }
+func BenchmarkCopy4String(b *testing.B) { benchmarkCopyStr(b, 4) }
+func BenchmarkCopy8String(b *testing.B) { benchmarkCopyStr(b, 8) }
+func BenchmarkCopy12String(b *testing.B) { benchmarkCopyStr(b, 12) }
+func BenchmarkCopy16String(b *testing.B) { benchmarkCopyStr(b, 16) }
+func BenchmarkCopy32String(b *testing.B) { benchmarkCopyStr(b, 32) }
+func BenchmarkCopy128String(b *testing.B) { benchmarkCopyStr(b, 128) }
+func BenchmarkCopy1024String(b *testing.B) { benchmarkCopyStr(b, 1024) }
diff --git a/libgo/go/runtime/crash_cgo_test.go b/libgo/go/runtime/crash_cgo_test.go
index 6b93bd1..b534b89 100644
--- a/libgo/go/runtime/crash_cgo_test.go
+++ b/libgo/go/runtime/crash_cgo_test.go
@@ -7,6 +7,7 @@
package runtime_test
import (
+ "runtime"
"testing"
)
@@ -15,13 +16,23 @@ func TestCgoCrashHandler(t *testing.T) {
}
func TestCgoSignalDeadlock(t *testing.T) {
- /* gccgo does not have a go command
+ if testing.Short() && runtime.GOOS == "windows" {
+ t.Skip("Skipping in short mode") // takes up to 64 seconds
+ }
+ t.Skip("gccgo does not have a go command")
got := executeTest(t, cgoSignalDeadlockSource, nil)
want := "OK\n"
if got != want {
t.Fatalf("expected %q, but got %q", want, got)
}
- */
+}
+
+func TestCgoTraceback(t *testing.T) {
+ got := executeTest(t, cgoTracebackSource, nil)
+ want := "OK\n"
+ if got != want {
+ t.Fatalf("expected %q, but got %q", want, got)
+ }
}
const cgoSignalDeadlockSource = `
@@ -88,3 +99,22 @@ func main() {
fmt.Printf("OK\n")
}
`
+
+const cgoTracebackSource = `
+package main
+
+/* void foo(void) {} */
+import "C"
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func main() {
+ C.foo()
+ buf := make([]byte, 1)
+ runtime.Stack(buf, true)
+ fmt.Printf("OK\n")
+}
+`
diff --git a/libgo/go/runtime/crash_test.go b/libgo/go/runtime/crash_test.go
index 918249a..d8bfdbd 100644
--- a/libgo/go/runtime/crash_test.go
+++ b/libgo/go/runtime/crash_test.go
@@ -14,7 +14,7 @@ import (
"text/template"
)
-// testEnv excludes GOGCTRACE from the environment
+// testEnv excludes GODEBUG from the environment
// to prevent its output from breaking tests that
// are trying to parse other command output.
func testEnv(cmd *exec.Cmd) *exec.Cmd {
@@ -22,7 +22,7 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd {
panic("environment already set")
}
for _, env := range os.Environ() {
- if strings.HasPrefix(env, "GOGCTRACE=") {
+ if strings.HasPrefix(env, "GODEBUG=") {
continue
}
cmd.Env = append(cmd.Env, env)
@@ -31,6 +31,7 @@ func testEnv(cmd *exec.Cmd) *exec.Cmd {
}
func executeTest(t *testing.T, templ string, data interface{}) string {
+ t.Skip("gccgo does not have a go command")
checkStaleRuntime(t)
st := template.Must(template.New("crashSource").Parse(templ))
@@ -44,14 +45,16 @@ func executeTest(t *testing.T, templ string, data interface{}) string {
src := filepath.Join(dir, "main.go")
f, err := os.Create(src)
if err != nil {
- t.Fatalf("failed to create %v: %v", src, err)
+ t.Fatalf("failed to create file: %v", err)
}
err = st.Execute(f, data)
if err != nil {
f.Close()
t.Fatalf("failed to execute template: %v", err)
}
- f.Close()
+ if err := f.Close(); err != nil {
+ t.Fatalf("failed to close file: %v", err)
+ }
got, _ := testEnv(exec.Command("go", "run", src)).CombinedOutput()
return string(got)
@@ -69,16 +72,14 @@ func checkStaleRuntime(t *testing.T) {
}
func testCrashHandler(t *testing.T, cgo bool) {
- /* gccgo does not have a go command
type crashTest struct {
Cgo bool
}
- got := executeTest(t, crashSource, &crashTest{Cgo: cgo})
+ output := executeTest(t, crashSource, &crashTest{Cgo: cgo})
want := "main: recovered done\nnew-thread: recovered done\nsecond-new-thread: recovered done\nmain-again: recovered done\n"
- if got != want {
- t.Fatalf("expected %q, but got %q", want, got)
+ if output != want {
+ t.Fatalf("output:\n%s\n\nwanted:\n%s", output, want)
}
- */
}
func TestCrashHandler(t *testing.T) {
@@ -86,13 +87,11 @@ func TestCrashHandler(t *testing.T) {
}
func testDeadlock(t *testing.T, source string) {
- /* gccgo does not have a go command.
- got := executeTest(t, source, nil)
+ output := executeTest(t, source, nil)
want := "fatal error: all goroutines are asleep - deadlock!\n"
- if !strings.HasPrefix(got, want) {
- t.Fatalf("expected %q, but got %q", want, got)
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
}
- */
}
func TestSimpleDeadlock(t *testing.T) {
@@ -112,13 +111,26 @@ func TestLockedDeadlock2(t *testing.T) {
}
func TestGoexitDeadlock(t *testing.T) {
- /* gccgo does not have a go command
- got := executeTest(t, goexitDeadlockSource, nil)
- want := ""
- if got != want {
- t.Fatalf("expected %q, but got %q", want, got)
+ output := executeTest(t, goexitDeadlockSource, nil)
+ if output != "" {
+ t.Fatalf("expected no output, got:\n%s", output)
+ }
+}
+
+func TestStackOverflow(t *testing.T) {
+ output := executeTest(t, stackOverflowSource, nil)
+ want := "runtime: goroutine stack exceeds 4194304-byte limit\nfatal error: stack overflow"
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
+ }
+}
+
+func TestThreadExhaustion(t *testing.T) {
+ output := executeTest(t, threadExhaustionSource, nil)
+ want := "runtime: program exceeds 10-thread limit\nfatal error: thread exhaustion"
+ if !strings.HasPrefix(output, want) {
+ t.Fatalf("output does not start with %q:\n%s", want, output)
}
- */
}
const crashSource = `
@@ -223,3 +235,41 @@ func main() {
runtime.Goexit()
}
`
+
+const stackOverflowSource = `
+package main
+
+import "runtime/debug"
+
+func main() {
+ debug.SetMaxStack(4<<20)
+ f(make([]byte, 10))
+}
+
+func f(x []byte) byte {
+ var buf [64<<10]byte
+ return x[0] + f(buf[:])
+}
+`
+
+const threadExhaustionSource = `
+package main
+
+import (
+ "runtime"
+ "runtime/debug"
+)
+
+func main() {
+ debug.SetMaxThreads(10)
+ c := make(chan int)
+ for i := 0; i < 100; i++ {
+ go func() {
+ runtime.LockOSThread()
+ c <- 0
+ select{}
+ }()
+ <-c
+ }
+}
+`
diff --git a/libgo/go/runtime/debug/garbage.go b/libgo/go/runtime/debug/garbage.go
index 8f30264..8337d5d 100644
--- a/libgo/go/runtime/debug/garbage.go
+++ b/libgo/go/runtime/debug/garbage.go
@@ -24,6 +24,8 @@ func readGCStats(*[]time.Duration)
func enableGC(bool) bool
func setGCPercent(int) int
func freeOSMemory()
+func setMaxStack(int) int
+func setMaxThreads(int) int
// ReadGCStats reads statistics about garbage collection into stats.
// The number of entries in the pause history is system-dependent;
@@ -99,3 +101,35 @@ func SetGCPercent(percent int) int {
func FreeOSMemory() {
freeOSMemory()
}
+
+// SetMaxStack sets the maximum amount of memory that
+// can be used by a single goroutine stack.
+// If any goroutine exceeds this limit while growing its stack,
+// the program crashes.
+// SetMaxStack returns the previous setting.
+// The initial setting is 1 GB on 64-bit systems, 250 MB on 32-bit systems.
+//
+// SetMaxStack is useful mainly for limiting the damage done by
+// goroutines that enter an infinite recursion. It only limits future
+// stack growth.
+func SetMaxStack(bytes int) int {
+ return setMaxStack(bytes)
+}
+
+// SetMaxThreads sets the maximum number of operating system
+// threads that the Go program can use. If it attempts to use more than
+// this many, the program crashes.
+// SetMaxThreads returns the previous setting.
+// The initial setting is 10,000 threads.
+//
+// The limit controls the number of operating system threads, not the number
+// of goroutines. A Go program creates a new thread only when a goroutine
+// is ready to run but all the existing threads are blocked in system calls, cgo calls,
+// or are locked to other goroutines due to use of runtime.LockOSThread.
+//
+// SetMaxThreads is useful mainly for limiting the damage done by
+// programs that create an unbounded number of threads. The idea is
+// to take down the program before it takes down the operating system.
+func SetMaxThreads(threads int) int {
+ return setMaxThreads(threads)
+}
diff --git a/libgo/go/runtime/debug/stack_test.go b/libgo/go/runtime/debug/stack_test.go
index bbd6626..263d715 100644
--- a/libgo/go/runtime/debug/stack_test.go
+++ b/libgo/go/runtime/debug/stack_test.go
@@ -49,10 +49,10 @@ func TestStack(t *testing.T) {
n++
}
}
- frame("src/pkg/runtime/debug/stack_test.go", "\t(*T).ptrmethod: return Stack()")
- frame("src/pkg/runtime/debug/stack_test.go", "\tT.method: return t.ptrmethod()")
- frame("src/pkg/runtime/debug/stack_test.go", "\tTestStack: b := T(0).method()")
- frame("src/pkg/testing/testing.go", "")
+ frame("stack_test.go", "\tmethod.N15_runtime_debug.T: return Stack()")
+ frame("stack_test.go", "\tmethod.N15_runtime_debug.T: return t.ptrmethod()")
+ frame("stack_test.go", "\tTestStack: b := T(0).method()")
+ frame("testing/testing.go", "")
}
func check(t *testing.T, line, has string) {
diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go
index f7f81e9..88d5df5 100644
--- a/libgo/go/runtime/error.go
+++ b/libgo/go/runtime/error.go
@@ -106,6 +106,22 @@ func NewErrorString(s string, ret *interface{}) {
*ret = errorString(s)
}
+// An errorCString represents a runtime error described by a single C string.
+type errorCString uintptr
+
+func (e errorCString) RuntimeError() {}
+
+func cstringToGo(uintptr) string
+
+func (e errorCString) Error() string {
+ return "runtime error: " + cstringToGo(uintptr(e))
+}
+
+// For calling from C.
+func NewErrorCString(s uintptr, ret *interface{}) {
+ *ret = errorCString(s)
+}
+
type stringer interface {
String() string
}
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index e22fa62..2f678b6 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -65,3 +65,20 @@ func testSchedLocalQueueSteal()
var TestSchedLocalQueue1 = testSchedLocalQueue
var TestSchedLocalQueueSteal1 = testSchedLocalQueueSteal
+
+// func haveGoodHash() bool
+// func stringHash(s string, seed uintptr) uintptr
+// func bytesHash(b []byte, seed uintptr) uintptr
+// func int32Hash(i uint32, seed uintptr) uintptr
+// func int64Hash(i uint64, seed uintptr) uintptr
+
+// var HaveGoodHash = haveGoodHash
+// var StringHash = stringHash
+// var BytesHash = bytesHash
+// var Int32Hash = int32Hash
+// var Int64Hash = int64Hash
+
+// func GogoBytes() int32
+
+var hashLoad float64 // declared in hashmap.c
+var HashLoad = &hashLoad
diff --git a/libgo/go/runtime/extern.go b/libgo/go/runtime/extern.go
index 72e1a0a..527e9cd 100644
--- a/libgo/go/runtime/extern.go
+++ b/libgo/go/runtime/extern.go
@@ -21,11 +21,20 @@ is GOGC=100. Setting GOGC=off disables the garbage collector entirely.
The runtime/debug package's SetGCPercent function allows changing this
percentage at run time. See http://golang.org/pkg/runtime/debug/#SetGCPercent.
-The GOGCTRACE variable controls debug output from the garbage collector.
-Setting GOGCTRACE=1 causes the garbage collector to emit a single line to standard
-error at each collection, summarizing the amount of memory collected and the
-length of the pause. Setting GOGCTRACE=2 emits the same summary but also
-repeats each collection.
+The GODEBUG variable controls debug output from the runtime. GODEBUG value is
+a comma-separated list of name=val pairs. Supported names are:
+
+ gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard
+ error at each collection, summarizing the amount of memory collected and the
+ length of the pause. Setting gctrace=2 emits the same summary but also
+ repeats each collection.
+
+ schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
+ error every X milliseconds, summarizing the scheduler state.
+
+ scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
+ detailed multiline info every X milliseconds, describing state of the scheduler,
+ processors, threads and goroutines.
The GOMAXPROCS variable limits the number of operating system threads that
can execute user-level Go code simultaneously. There is no limit to the number of threads
@@ -77,9 +86,8 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool)
// It returns the number of entries written to pc.
func Callers(skip int, pc []uintptr) int
-type Func struct { // Keep in sync with runtime.h:struct Func
- name string
- entry uintptr // entry pc
+type Func struct {
+ opaque struct{} // unexported field to disallow conversions
}
// FuncForPC returns a *Func describing the function that contains the
@@ -87,10 +95,14 @@ type Func struct { // Keep in sync with runtime.h:struct Func
func FuncForPC(pc uintptr) *Func
// Name returns the name of the function.
-func (f *Func) Name() string { return f.name }
+func (f *Func) Name() string {
+ return funcname_go(f)
+}
// Entry returns the entry address of the function.
-func (f *Func) Entry() uintptr { return f.entry }
+func (f *Func) Entry() uintptr {
+ return funcentry_go(f)
+}
// FileLine returns the file name and line number of the
// source code corresponding to the program counter pc.
@@ -102,6 +114,8 @@ func (f *Func) FileLine(pc uintptr) (file string, line int) {
// implemented in symtab.c
func funcline_go(*Func, uintptr) (string, int)
+func funcname_go(*Func) string
+func funcentry_go(*Func) uintptr
// SetFinalizer sets the finalizer associated with x to f.
// When the garbage collector finds an unreachable block
@@ -116,8 +130,9 @@ func funcline_go(*Func, uintptr) (string, int)
// The argument x must be a pointer to an object allocated by
// calling new or by taking the address of a composite literal.
// The argument f must be a function that takes a single argument
-// of x's type and can have arbitrary ignored return values.
-// If either of these is not true, SetFinalizer aborts the program.
+// to which x's type can be assigned, and can have arbitrary ignored return
+// values. If either of these is not true, SetFinalizer aborts the
+// program.
//
// Finalizers are run in dependency order: if A points at B, both have
// finalizers, and they are otherwise unreachable, only the finalizer
diff --git a/libgo/go/runtime/gc_test.go b/libgo/go/runtime/gc_test.go
index 05ee348..1b3ccbf 100644
--- a/libgo/go/runtime/gc_test.go
+++ b/libgo/go/runtime/gc_test.go
@@ -138,7 +138,9 @@ func TestGcRescan(t *testing.T) {
for i := 0; i < 10; i++ {
p := &Y{}
p.c = make(chan error)
- p.nextx = &head.X
+ if head != nil {
+ p.nextx = &head.X
+ }
p.nexty = head
p.p = new(int)
*p.p = 42
diff --git a/libgo/go/runtime/malloc_test.go b/libgo/go/runtime/malloc_test.go
new file mode 100644
index 0000000..054f6a7
--- /dev/null
+++ b/libgo/go/runtime/malloc_test.go
@@ -0,0 +1,156 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "flag"
+ . "runtime"
+ "testing"
+ "time"
+ "unsafe"
+)
+
+func TestMemStats(t *testing.T) {
+ // Test that MemStats has sane values.
+ st := new(MemStats)
+ ReadMemStats(st)
+ if st.HeapSys == 0 || /* st.StackSys == 0 || */ st.MSpanSys == 0 || st.MCacheSys == 0 ||
+ st.BuckHashSys == 0 || st.GCSys == 0 || st.OtherSys == 0 {
+ t.Fatalf("Zero sys value: %+v", *st)
+ }
+ if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
+ st.BuckHashSys+st.GCSys+st.OtherSys {
+ t.Fatalf("Bad sys value: %+v", *st)
+ }
+}
+
+var mallocSink uintptr
+
+func BenchmarkMalloc8(b *testing.B) {
+ var x uintptr
+ for i := 0; i < b.N; i++ {
+ p := new(int64)
+ x ^= uintptr(unsafe.Pointer(p))
+ }
+ mallocSink = x
+}
+
+func BenchmarkMalloc16(b *testing.B) {
+ var x uintptr
+ for i := 0; i < b.N; i++ {
+ p := new([2]int64)
+ x ^= uintptr(unsafe.Pointer(p))
+ }
+ mallocSink = x
+}
+
+func BenchmarkMallocTypeInfo8(b *testing.B) {
+ var x uintptr
+ for i := 0; i < b.N; i++ {
+ p := new(struct {
+ p [8 / unsafe.Sizeof(uintptr(0))]*int
+ })
+ x ^= uintptr(unsafe.Pointer(p))
+ }
+ mallocSink = x
+}
+
+func BenchmarkMallocTypeInfo16(b *testing.B) {
+ var x uintptr
+ for i := 0; i < b.N; i++ {
+ p := new(struct {
+ p [16 / unsafe.Sizeof(uintptr(0))]*int
+ })
+ x ^= uintptr(unsafe.Pointer(p))
+ }
+ mallocSink = x
+}
+
+var n = flag.Int("n", 1000, "number of goroutines")
+
+func BenchmarkGoroutineSelect(b *testing.B) {
+ quit := make(chan struct{})
+ read := func(ch chan struct{}) {
+ for {
+ select {
+ case _, ok := <-ch:
+ if !ok {
+ return
+ }
+ case <-quit:
+ return
+ }
+ }
+ }
+ benchHelper(b, *n, read)
+}
+
+func BenchmarkGoroutineBlocking(b *testing.B) {
+ read := func(ch chan struct{}) {
+ for {
+ if _, ok := <-ch; !ok {
+ return
+ }
+ }
+ }
+ benchHelper(b, *n, read)
+}
+
+func BenchmarkGoroutineForRange(b *testing.B) {
+ read := func(ch chan struct{}) {
+ for _ = range ch {
+ }
+ }
+ benchHelper(b, *n, read)
+}
+
+func benchHelper(b *testing.B, n int, read func(chan struct{})) {
+ m := make([]chan struct{}, n)
+ for i := range m {
+ m[i] = make(chan struct{}, 1)
+ go read(m[i])
+ }
+ b.StopTimer()
+ b.ResetTimer()
+ GC()
+
+ for i := 0; i < b.N; i++ {
+ for _, ch := range m {
+ if ch != nil {
+ ch <- struct{}{}
+ }
+ }
+ time.Sleep(10 * time.Millisecond)
+ b.StartTimer()
+ GC()
+ b.StopTimer()
+ }
+
+ for _, ch := range m {
+ close(ch)
+ }
+ time.Sleep(10 * time.Millisecond)
+}
+
+func BenchmarkGoroutineIdle(b *testing.B) {
+ quit := make(chan struct{})
+ fn := func() {
+ <-quit
+ }
+ for i := 0; i < *n; i++ {
+ go fn()
+ }
+
+ GC()
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ GC()
+ }
+
+ b.StopTimer()
+ close(quit)
+ time.Sleep(10 * time.Millisecond)
+}
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index 535c59e..c53066a 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -378,3 +378,41 @@ func testMapLookups(t *testing.T, m map[string]string) {
}
}
}
+
+// Tests whether the iterator returns the right elements when
+// started in the middle of a grow, when the keys are NaNs.
+func TestMapNanGrowIterator(t *testing.T) {
+ m := make(map[float64]int)
+ nan := math.NaN()
+ const nBuckets = 16
+ // To fill nBuckets buckets takes LOAD * nBuckets keys.
+ nKeys := int(nBuckets * *runtime.HashLoad)
+
+ // Get map to full point with nan keys.
+ for i := 0; i < nKeys; i++ {
+ m[nan] = i
+ }
+ // Trigger grow
+ m[1.0] = 1
+ delete(m, 1.0)
+
+ // Run iterator
+ found := make(map[int]struct{})
+ for _, v := range m {
+ if v != -1 {
+ if _, repeat := found[v]; repeat {
+ t.Fatalf("repeat of value %d", v)
+ }
+ found[v] = struct{}{}
+ }
+ if len(found) == nKeys/2 {
+ // Halfway through iteration, finish grow.
+ for i := 0; i < nBuckets; i++ {
+ delete(m, 1.0)
+ }
+ }
+ }
+ if len(found) != nKeys {
+ t.Fatalf("missing value")
+ }
+}
diff --git a/libgo/go/runtime/mapspeed_test.go b/libgo/go/runtime/mapspeed_test.go
index 3b7fbfd..d643d98 100644
--- a/libgo/go/runtime/mapspeed_test.go
+++ b/libgo/go/runtime/mapspeed_test.go
@@ -32,6 +32,33 @@ func BenchmarkHashStringSpeed(b *testing.B) {
}
}
+type chunk [17]byte
+
+func BenchmarkHashBytesSpeed(b *testing.B) {
+ // a bunch of chunks, each with a different alignment mod 16
+ var chunks [size]chunk
+ // initialize each to a different value
+ for i := 0; i < size; i++ {
+ chunks[i][0] = byte(i)
+ }
+ // put into a map
+ m := make(map[chunk]int, size)
+ for i, c := range chunks {
+ m[c] = i
+ }
+ idx := 0
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ if m[chunks[idx]] != idx {
+ b.Error("bad map entry for chunk")
+ }
+ idx++
+ if idx == size {
+ idx = 0
+ }
+ }
+}
+
func BenchmarkHashInt32Speed(b *testing.B) {
ints := make([]int32, size)
for i := 0; i < size; i++ {
@@ -206,3 +233,38 @@ func BenchmarkNewEmptyMap(b *testing.B) {
_ = make(map[int]int)
}
}
+
+func BenchmarkMapIter(b *testing.B) {
+ m := make(map[int]bool)
+ for i := 0; i < 8; i++ {
+ m[i] = true
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, _ = range m {
+ }
+ }
+}
+
+func BenchmarkMapIterEmpty(b *testing.B) {
+ m := make(map[int]bool)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ for _, _ = range m {
+ }
+ }
+}
+
+func BenchmarkSameLengthMap(b *testing.B) {
+ // long strings, same length, differ in first few
+ // and last few bytes.
+ m := make(map[string]bool)
+ s1 := "foo" + strings.Repeat("-", 100) + "bar"
+ s2 := "goo" + strings.Repeat("-", 100) + "ber"
+ m[s1] = true
+ m[s2] = true
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _ = m[s1]
+ }
+}
diff --git a/libgo/go/runtime/mem.go b/libgo/go/runtime/mem.go
index fd423c8..ba6d1cf 100644
--- a/libgo/go/runtime/mem.go
+++ b/libgo/go/runtime/mem.go
@@ -14,7 +14,7 @@ type MemStats struct {
// General statistics.
Alloc uint64 // bytes allocated and still in use
TotalAlloc uint64 // bytes allocated (even if freed)
- Sys uint64 // bytes obtained from system (should be sum of XxxSys below)
+ Sys uint64 // bytes obtained from system (sum of XxxSys below)
Lookups uint64 // number of pointer lookups
Mallocs uint64 // number of mallocs
Frees uint64 // number of frees
@@ -37,6 +37,8 @@ type MemStats struct {
MCacheInuse uint64 // mcache structures
MCacheSys uint64
BuckHashSys uint64 // profiling bucket hash table
+ GCSys uint64 // GC metadata
+ OtherSys uint64 // other system allocations
// Garbage collector statistics.
NextGC uint64 // next run in HeapAlloc time (bytes)
diff --git a/libgo/go/runtime/memmove_test.go b/libgo/go/runtime/memmove_test.go
new file mode 100644
index 0000000..9525f06
--- /dev/null
+++ b/libgo/go/runtime/memmove_test.go
@@ -0,0 +1,116 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "testing"
+)
+
+func TestMemmove(t *testing.T) {
+ size := 256
+ if testing.Short() {
+ size = 128 + 16
+ }
+ src := make([]byte, size)
+ dst := make([]byte, size)
+ for i := 0; i < size; i++ {
+ src[i] = byte(128 + (i & 127))
+ }
+ for i := 0; i < size; i++ {
+ dst[i] = byte(i & 127)
+ }
+ for n := 0; n <= size; n++ {
+ for x := 0; x <= size-n; x++ { // offset in src
+ for y := 0; y <= size-n; y++ { // offset in dst
+ copy(dst[y:y+n], src[x:x+n])
+ for i := 0; i < y; i++ {
+ if dst[i] != byte(i&127) {
+ t.Fatalf("prefix dst[%d] = %d", i, dst[i])
+ }
+ }
+ for i := y; i < y+n; i++ {
+ if dst[i] != byte(128+((i-y+x)&127)) {
+ t.Fatalf("copied dst[%d] = %d", i, dst[i])
+ }
+ dst[i] = byte(i & 127) // reset dst
+ }
+ for i := y + n; i < size; i++ {
+ if dst[i] != byte(i&127) {
+ t.Fatalf("suffix dst[%d] = %d", i, dst[i])
+ }
+ }
+ }
+ }
+ }
+}
+
+func TestMemmoveAlias(t *testing.T) {
+ size := 256
+ if testing.Short() {
+ size = 128 + 16
+ }
+ buf := make([]byte, size)
+ for i := 0; i < size; i++ {
+ buf[i] = byte(i)
+ }
+ for n := 0; n <= size; n++ {
+ for x := 0; x <= size-n; x++ { // src offset
+ for y := 0; y <= size-n; y++ { // dst offset
+ copy(buf[y:y+n], buf[x:x+n])
+ for i := 0; i < y; i++ {
+ if buf[i] != byte(i) {
+ t.Fatalf("prefix buf[%d] = %d", i, buf[i])
+ }
+ }
+ for i := y; i < y+n; i++ {
+ if buf[i] != byte(i-y+x) {
+ t.Fatalf("copied buf[%d] = %d", i, buf[i])
+ }
+ buf[i] = byte(i) // reset buf
+ }
+ for i := y + n; i < size; i++ {
+ if buf[i] != byte(i) {
+ t.Fatalf("suffix buf[%d] = %d", i, buf[i])
+ }
+ }
+ }
+ }
+ }
+}
+
+func bmMemmove(n int, b *testing.B) {
+ x := make([]byte, n)
+ y := make([]byte, n)
+ b.SetBytes(int64(n))
+ for i := 0; i < b.N; i++ {
+ copy(x, y)
+ }
+}
+
+func BenchmarkMemmove0(b *testing.B) { bmMemmove(0, b) }
+func BenchmarkMemmove1(b *testing.B) { bmMemmove(1, b) }
+func BenchmarkMemmove2(b *testing.B) { bmMemmove(2, b) }
+func BenchmarkMemmove3(b *testing.B) { bmMemmove(3, b) }
+func BenchmarkMemmove4(b *testing.B) { bmMemmove(4, b) }
+func BenchmarkMemmove5(b *testing.B) { bmMemmove(5, b) }
+func BenchmarkMemmove6(b *testing.B) { bmMemmove(6, b) }
+func BenchmarkMemmove7(b *testing.B) { bmMemmove(7, b) }
+func BenchmarkMemmove8(b *testing.B) { bmMemmove(8, b) }
+func BenchmarkMemmove9(b *testing.B) { bmMemmove(9, b) }
+func BenchmarkMemmove10(b *testing.B) { bmMemmove(10, b) }
+func BenchmarkMemmove11(b *testing.B) { bmMemmove(11, b) }
+func BenchmarkMemmove12(b *testing.B) { bmMemmove(12, b) }
+func BenchmarkMemmove13(b *testing.B) { bmMemmove(13, b) }
+func BenchmarkMemmove14(b *testing.B) { bmMemmove(14, b) }
+func BenchmarkMemmove15(b *testing.B) { bmMemmove(15, b) }
+func BenchmarkMemmove16(b *testing.B) { bmMemmove(16, b) }
+func BenchmarkMemmove32(b *testing.B) { bmMemmove(32, b) }
+func BenchmarkMemmove64(b *testing.B) { bmMemmove(64, b) }
+func BenchmarkMemmove128(b *testing.B) { bmMemmove(128, b) }
+func BenchmarkMemmove256(b *testing.B) { bmMemmove(256, b) }
+func BenchmarkMemmove512(b *testing.B) { bmMemmove(512, b) }
+func BenchmarkMemmove1024(b *testing.B) { bmMemmove(1024, b) }
+func BenchmarkMemmove2048(b *testing.B) { bmMemmove(2048, b) }
+func BenchmarkMemmove4096(b *testing.B) { bmMemmove(4096, b) }
diff --git a/libgo/go/runtime/mfinal_test.go b/libgo/go/runtime/mfinal_test.go
index de63271..6efef9b 100644
--- a/libgo/go/runtime/mfinal_test.go
+++ b/libgo/go/runtime/mfinal_test.go
@@ -9,8 +9,94 @@ import (
"sync"
"sync/atomic"
"testing"
+ "time"
)
+type Tintptr *int // assignable to *int
+type Tint int // *Tint implements Tinter, interface{}
+
+func (t *Tint) m() {}
+
+type Tinter interface {
+ m()
+}
+
+func TestFinalizerType(t *testing.T) {
+ if runtime.GOARCH != "amd64" {
+ t.Skipf("Skipping on non-amd64 machine")
+ }
+
+ ch := make(chan bool, 10)
+ finalize := func(x *int) {
+ if *x != 97531 {
+ t.Errorf("finalizer %d, want %d", *x, 97531)
+ }
+ ch <- true
+ }
+
+ var finalizerTests = []struct {
+ convert func(*int) interface{}
+ finalizer interface{}
+ }{
+ {func(x *int) interface{} { return x }, func(v *int) { finalize(v) }},
+ {func(x *int) interface{} { return Tintptr(x) }, func(v Tintptr) { finalize(v) }},
+ {func(x *int) interface{} { return Tintptr(x) }, func(v *int) { finalize(v) }},
+ {func(x *int) interface{} { return (*Tint)(x) }, func(v *Tint) { finalize((*int)(v)) }},
+ {func(x *int) interface{} { return (*Tint)(x) }, func(v Tinter) { finalize((*int)(v.(*Tint))) }},
+ }
+
+ for _, tt := range finalizerTests {
+ go func() {
+ v := new(int)
+ *v = 97531
+ runtime.SetFinalizer(tt.convert(v), tt.finalizer)
+ v = nil
+ }()
+ time.Sleep(1 * time.Second)
+ runtime.GC()
+ select {
+ case <-ch:
+ case <-time.After(time.Second * 4):
+ t.Errorf("finalizer for type %T didn't run", tt.finalizer)
+ }
+ }
+}
+
+type bigValue struct {
+ fill uint64
+ it bool
+ up string
+}
+
+func TestFinalizerInterfaceBig(t *testing.T) {
+ if runtime.GOARCH != "amd64" {
+ t.Skipf("Skipping on non-amd64 machine")
+ }
+ ch := make(chan bool)
+ go func() {
+ v := &bigValue{0xDEADBEEFDEADBEEF, true, "It matters not how strait the gate"}
+ old := *v
+ runtime.SetFinalizer(v, func(v interface{}) {
+ i, ok := v.(*bigValue)
+ if !ok {
+ t.Errorf("finalizer called with type %T, want *bigValue", v)
+ }
+ if *i != old {
+ t.Errorf("finalizer called with %+v, want %+v", *i, old)
+ }
+ close(ch)
+ })
+ v = nil
+ }()
+ time.Sleep(1 * time.Second)
+ runtime.GC()
+ select {
+ case <-ch:
+ case <-time.After(4 * time.Second):
+ t.Errorf("finalizer for type *bigValue didn't run")
+ }
+}
+
func fin(v *int) {
}
diff --git a/libgo/go/runtime/norace_test.go b/libgo/go/runtime/norace_test.go
new file mode 100644
index 0000000..a3d5b00
--- /dev/null
+++ b/libgo/go/runtime/norace_test.go
@@ -0,0 +1,58 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The file contains tests that can not run under race detector for some reason.
+// +build !race
+
+package runtime_test
+
+import (
+ "runtime"
+ "sync/atomic"
+ "testing"
+)
+
+// Syscall tests split stack between Entersyscall and Exitsyscall under race detector.
+func BenchmarkSyscall(b *testing.B) {
+ benchmarkSyscall(b, 0, 1)
+}
+
+func BenchmarkSyscallWork(b *testing.B) {
+ benchmarkSyscall(b, 100, 1)
+}
+
+func BenchmarkSyscallExcess(b *testing.B) {
+ benchmarkSyscall(b, 0, 4)
+}
+
+func BenchmarkSyscallExcessWork(b *testing.B) {
+ benchmarkSyscall(b, 100, 4)
+}
+
+func benchmarkSyscall(b *testing.B, work, excess int) {
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(-1) * excess
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ foo := 42
+ for atomic.AddInt32(&N, -1) >= 0 {
+ runtime.Gosched()
+ for g := 0; g < CallsPerSched; g++ {
+ runtime.Entersyscall()
+ for i := 0; i < work; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ runtime.Exitsyscall()
+ }
+ }
+ c <- foo == 42
+ }()
+ }
+ for p := 0; p < procs; p++ {
+ <-c
+ }
+}
diff --git a/libgo/go/runtime/parfor_test.go b/libgo/go/runtime/parfor_test.go
index 4c69a68..de64285 100644
--- a/libgo/go/runtime/parfor_test.go
+++ b/libgo/go/runtime/parfor_test.go
@@ -102,11 +102,6 @@ func TestParForSetup(t *testing.T) {
// Test parallel parallelfor.
func TestParForParallel(t *testing.T) {
- if GOARCH != "amd64" {
- t.Log("temporarily disabled, see http://golang.org/issue/4155")
- return
- }
-
N := uint64(1e7)
if testing.Short() {
N /= 10
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index 32c1098..3b84285 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -20,8 +20,8 @@ import (
"text/tabwriter"
)
-// BUG(rsc): A bug in the OS X Snow Leopard 64-bit kernel prevents
-// CPU profiling from giving accurate results on that system.
+// BUG(rsc): Profiles are incomplete and inaccuate on NetBSD, OpenBSD, and OS X.
+// See http://golang.org/issue/6047 for details.
// A Profile is a collection of stack traces showing the call sequences
// that led to instances of a particular event, such as allocation.
@@ -666,7 +666,7 @@ func writeBlock(w io.Writer, debug int) error {
}
fmt.Fprint(w, "\n")
if debug > 0 {
- printStackRecord(w, r.Stack(), false)
+ printStackRecord(w, r.Stack(), true)
}
}
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index 5762e17..bdbbf42 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -9,57 +9,66 @@ import (
"fmt"
"hash/crc32"
"os/exec"
+ "regexp"
"runtime"
. "runtime/pprof"
"strings"
+ "sync"
"testing"
+ "time"
"unsafe"
)
func TestCPUProfile(t *testing.T) {
- switch runtime.GOOS {
- case "darwin":
- out, err := exec.Command("uname", "-a").CombinedOutput()
- if err != nil {
- t.Fatal(err)
- }
- vers := string(out)
- t.Logf("uname -a: %v", vers)
- // Lion uses "Darwin Kernel Version 11".
- if strings.Contains(vers, "Darwin Kernel Version 10") && strings.Contains(vers, "RELEASE_X86_64") {
- t.Skip("skipping test on known-broken kernel (64-bit Leopard / Snow Leopard)")
+ buf := make([]byte, 100000)
+ testCPUProfile(t, []string{"crc32.update"}, func() {
+ // This loop takes about a quarter second on a 2 GHz laptop.
+ // We only need to get one 100 Hz clock tick, so we've got
+ // a 25x safety buffer.
+ for i := 0; i < 1000; i++ {
+ crc32.ChecksumIEEE(buf)
}
- case "plan9":
- // unimplemented
- return
- }
+ })
+}
+func TestCPUProfileMultithreaded(t *testing.T) {
buf := make([]byte, 100000)
- var prof bytes.Buffer
- if err := StartCPUProfile(&prof); err != nil {
- t.Fatal(err)
- }
- // This loop takes about a quarter second on a 2 GHz laptop.
- // We only need to get one 100 Hz clock tick, so we've got
- // a 25x safety buffer.
- for i := 0; i < 1000; i++ {
- crc32.ChecksumIEEE(buf)
- }
- StopCPUProfile()
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
+ testCPUProfile(t, []string{"crc32.update"}, func() {
+ c := make(chan int)
+ go func() {
+ for i := 0; i < 2000; i++ {
+ crc32.Update(0, crc32.IEEETable, buf)
+ }
+ c <- 1
+ }()
+ // This loop takes about a quarter second on a 2 GHz laptop.
+ // We only need to get one 100 Hz clock tick, so we've got
+ // a 25x safety buffer.
+ for i := 0; i < 2000; i++ {
+ crc32.ChecksumIEEE(buf)
+ }
+ <-c
+ })
+}
+func parseProfile(t *testing.T, bytes []byte, f func(uintptr, []uintptr)) {
// Convert []byte to []uintptr.
- bytes := prof.Bytes()
l := len(bytes) / int(unsafe.Sizeof(uintptr(0)))
val := *(*[]uintptr)(unsafe.Pointer(&bytes))
val = val[:l]
- if l < 13 {
- t.Fatalf("profile too short: %#x", val)
+ // 5 for the header, 2 for the per-sample header on at least one sample, 3 for the trailer.
+ if l < 5+2+3 {
+ t.Logf("profile too short: %#x", val)
+ if badOS[runtime.GOOS] {
+ t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS)
+ return
+ }
+ t.FailNow()
}
- fmt.Println(val, l)
hd, val, tl := val[:5], val[5:l-3], val[l-3:]
- fmt.Println(hd, val, tl)
if hd[0] != 0 || hd[1] != 3 || hd[2] != 0 || hd[3] != 1e6/100 || hd[4] != 0 {
t.Fatalf("unexpected header %#x", hd)
}
@@ -68,26 +77,300 @@ func TestCPUProfile(t *testing.T) {
t.Fatalf("malformed end-of-data marker %#x", tl)
}
- // Check that profile is well formed and contains ChecksumIEEE.
- found := false
for len(val) > 0 {
if len(val) < 2 || val[0] < 1 || val[1] < 1 || uintptr(len(val)) < 2+val[1] {
t.Fatalf("malformed profile. leftover: %#x", val)
}
- for _, pc := range val[2 : 2+val[1]] {
+ f(val[0], val[2:2+val[1]])
+ val = val[2+val[1]:]
+ }
+}
+
+func testCPUProfile(t *testing.T, need []string, f func()) {
+ switch runtime.GOOS {
+ case "darwin":
+ out, err := exec.Command("uname", "-a").CombinedOutput()
+ if err != nil {
+ t.Fatal(err)
+ }
+ vers := string(out)
+ t.Logf("uname -a: %v", vers)
+ case "plan9":
+ // unimplemented
+ return
+ }
+
+ var prof bytes.Buffer
+ if err := StartCPUProfile(&prof); err != nil {
+ t.Fatal(err)
+ }
+ f()
+ StopCPUProfile()
+
+ // Check that profile is well formed and contains ChecksumIEEE.
+ have := make([]uintptr, len(need))
+ parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) {
+ for _, pc := range stk {
f := runtime.FuncForPC(pc)
if f == nil {
continue
}
- if strings.Contains(f.Name(), "ChecksumIEEE") ||
- (strings.Contains(f.Name(), "update") && strings.Contains(f.Name(), "crc32")) {
- found = true
+ for i, name := range need {
+ if strings.Contains(f.Name(), name) {
+ have[i] += count
+ }
}
}
- val = val[2+val[1]:]
+ })
+
+ var total uintptr
+ for i, name := range need {
+ total += have[i]
+ t.Logf("%s: %d\n", name, have[i])
+ }
+ ok := true
+ if total == 0 {
+ t.Logf("no CPU profile samples collected")
+ ok = false
+ }
+ min := total / uintptr(len(have)) / 3
+ for i, name := range need {
+ if have[i] < min {
+ t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
+ ok = false
+ }
+ }
+
+ if !ok {
+ if badOS[runtime.GOOS] {
+ t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS)
+ return
+ }
+ t.FailNow()
+ }
+}
+
+func TestCPUProfileWithFork(t *testing.T) {
+ // Fork can hang if preempted with signals frequently enough (see issue 5517).
+ // Ensure that we do not do this.
+ heap := 1 << 30
+ if testing.Short() {
+ heap = 100 << 20
}
+ // This makes fork slower.
+ garbage := make([]byte, heap)
+ // Need to touch the slice, otherwise it won't be paged in.
+ done := make(chan bool)
+ go func() {
+ for i := range garbage {
+ garbage[i] = 42
+ }
+ done <- true
+ }()
+ <-done
- if !found {
- t.Fatal("did not find ChecksumIEEE in the profile")
+ var prof bytes.Buffer
+ if err := StartCPUProfile(&prof); err != nil {
+ t.Fatal(err)
}
+ defer StopCPUProfile()
+
+ for i := 0; i < 10; i++ {
+ exec.Command("go").CombinedOutput()
+ }
+}
+
+// Test that profiler does not observe runtime.gogo as "user" goroutine execution.
+// If it did, it would see inconsistent state and would either record an incorrect stack
+// or crash because the stack was malformed.
+func TestGoroutineSwitch(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("flaky test; see http://golang.org/issue/6417")
+ }
+ // How much to try. These defaults take about 1 seconds
+ // on a 2012 MacBook Pro. The ones in short mode take
+ // about 0.1 seconds.
+ tries := 10
+ count := 1000000
+ if testing.Short() {
+ tries = 1
+ }
+ for try := 0; try < tries; try++ {
+ var prof bytes.Buffer
+ if err := StartCPUProfile(&prof); err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < count; i++ {
+ runtime.Gosched()
+ }
+ StopCPUProfile()
+
+ // Read profile to look for entries for runtime.gogo with an attempt at a traceback.
+ // The special entry
+ parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) {
+ // An entry with two frames with 'System' in its top frame
+ // exists to record a PC without a traceback. Those are okay.
+ if len(stk) == 2 {
+ f := runtime.FuncForPC(stk[1])
+ if f != nil && f.Name() == "System" {
+ return
+ }
+ }
+
+ // Otherwise, should not see runtime.gogo.
+ // The place we'd see it would be the inner most frame.
+ f := runtime.FuncForPC(stk[0])
+ if f != nil && f.Name() == "runtime.gogo" {
+ var buf bytes.Buffer
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ fmt.Fprintf(&buf, "%#x ?:0\n", pc)
+ } else {
+ file, line := f.FileLine(pc)
+ fmt.Fprintf(&buf, "%#x %s:%d\n", pc, file, line)
+ }
+ }
+ t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
+ }
+ })
+ }
+}
+
+// Operating systems that are expected to fail the tests. See issue 6047.
+var badOS = map[string]bool{
+ "darwin": true,
+ "netbsd": true,
+ "openbsd": true,
+}
+
+func TestBlockProfile(t *testing.T) {
+ t.Skip("lots of details are different for gccgo; FIXME")
+ type TestCase struct {
+ name string
+ f func()
+ re string
+ }
+ tests := [...]TestCase{
+ {"chan recv", blockChanRecv, `
+[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+`},
+ {"chan send", blockChanSend, `
+[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+`},
+ {"chan close", blockChanClose, `
+[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+`},
+ {"select recv async", blockSelectRecvAsync, `
+[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+`},
+ {"select send sync", blockSelectSendSync, `
+[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/pkg/runtime/chan.c:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+`},
+ {"mutex", blockMutex, `
+[0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+
+# 0x[0-9,a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9,a-f]+ .*/src/pkg/sync/mutex\.go:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.blockMutex\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+# 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/pkg/runtime/pprof/pprof_test.go:[0-9]+
+`},
+ }
+
+ runtime.SetBlockProfileRate(1)
+ defer runtime.SetBlockProfileRate(0)
+ for _, test := range tests {
+ test.f()
+ }
+ var w bytes.Buffer
+ Lookup("block").WriteTo(&w, 1)
+ prof := w.String()
+
+ if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
+ t.Fatalf("Bad profile header:\n%v", prof)
+ }
+
+ for _, test := range tests {
+ if !regexp.MustCompile(test.re).MatchString(prof) {
+ t.Fatalf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
+ }
+ }
+}
+
+const blockDelay = 10 * time.Millisecond
+
+func blockChanRecv() {
+ c := make(chan bool)
+ go func() {
+ time.Sleep(blockDelay)
+ c <- true
+ }()
+ <-c
+}
+
+func blockChanSend() {
+ c := make(chan bool)
+ go func() {
+ time.Sleep(blockDelay)
+ <-c
+ }()
+ c <- true
+}
+
+func blockChanClose() {
+ c := make(chan bool)
+ go func() {
+ time.Sleep(blockDelay)
+ close(c)
+ }()
+ <-c
+}
+
+func blockSelectRecvAsync() {
+ c := make(chan bool, 1)
+ c2 := make(chan bool, 1)
+ go func() {
+ time.Sleep(blockDelay)
+ c <- true
+ }()
+ select {
+ case <-c:
+ case <-c2:
+ }
+}
+
+func blockSelectSendSync() {
+ c := make(chan bool)
+ c2 := make(chan bool)
+ go func() {
+ time.Sleep(blockDelay)
+ <-c
+ }()
+ select {
+ case c <- true:
+ case c2 <- true:
+ }
+}
+
+func blockMutex() {
+ var mu sync.Mutex
+ mu.Lock()
+ go func() {
+ time.Sleep(blockDelay)
+ mu.Unlock()
+ }()
+ mu.Lock()
}
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index 21fb9c2..29f71e7 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -8,6 +8,7 @@ import (
"math"
"runtime"
"sync/atomic"
+ "syscall"
"testing"
"time"
)
@@ -92,6 +93,35 @@ func TestYieldLocked(t *testing.T) {
<-c
}
+func TestGoroutineParallelism(t *testing.T) {
+ P := 4
+ N := 10
+ if testing.Short() {
+ P = 3
+ N = 3
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
+ for try := 0; try < N; try++ {
+ done := make(chan bool)
+ x := uint32(0)
+ for p := 0; p < P; p++ {
+ // Test that all P goroutines are scheduled at the same time
+ go func(p int) {
+ for i := 0; i < 3; i++ {
+ expected := uint32(P*i + p)
+ for atomic.LoadUint32(&x) != expected {
+ }
+ atomic.StoreUint32(&x, expected+1)
+ }
+ done <- true
+ }(p)
+ }
+ for p := 0; p < P; p++ {
+ <-done
+ }
+ }
+}
+
func TestBlockLocked(t *testing.T) {
const N = 10
c := make(chan bool)
@@ -107,86 +137,212 @@ func TestBlockLocked(t *testing.T) {
}
}
-func stackGrowthRecursive(i int) {
- var pad [128]uint64
- if i != 0 && pad[0] == 0 {
- stackGrowthRecursive(i - 1)
+func TestTimerFairness(t *testing.T) {
+ done := make(chan bool)
+ c := make(chan bool)
+ for i := 0; i < 2; i++ {
+ go func() {
+ for {
+ select {
+ case c <- true:
+ case <-done:
+ return
+ }
+ }
+ }()
+ }
+
+ timer := time.After(20 * time.Millisecond)
+ for {
+ select {
+ case <-c:
+ case <-timer:
+ close(done)
+ return
+ }
}
}
-func TestSchedLocalQueue(t *testing.T) {
- runtime.TestSchedLocalQueue1()
+func TestTimerFairness2(t *testing.T) {
+ done := make(chan bool)
+ c := make(chan bool)
+ for i := 0; i < 2; i++ {
+ go func() {
+ timer := time.After(20 * time.Millisecond)
+ var buf [1]byte
+ for {
+ syscall.Read(0, buf[0:0])
+ select {
+ case c <- true:
+ case <-c:
+ case <-timer:
+ done <- true
+ return
+ }
+ }
+ }()
+ }
+ <-done
+ <-done
}
-func TestSchedLocalQueueSteal(t *testing.T) {
- runtime.TestSchedLocalQueueSteal1()
+// The function is used to test preemption at split stack checks.
+// Declaring a var avoids inlining at the call site.
+var preempt = func() int {
+ var a [128]int
+ sum := 0
+ for _, v := range a {
+ sum += v
+ }
+ return sum
}
-func benchmarkStackGrowth(b *testing.B, rec int) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- runtime.Gosched()
- for g := 0; g < CallsPerSched; g++ {
- stackGrowthRecursive(rec)
+func TestPreemption(t *testing.T) {
+ t.Skip("gccgo does not implement preemption")
+ // Test that goroutines are preempted at function calls.
+ N := 5
+ if testing.Short() {
+ N = 2
+ }
+ c := make(chan bool)
+ var x uint32
+ for g := 0; g < 2; g++ {
+ go func(g int) {
+ for i := 0; i < N; i++ {
+ for atomic.LoadUint32(&x) != uint32(g) {
+ preempt()
}
+ atomic.StoreUint32(&x, uint32(1-g))
}
c <- true
+ }(g)
+ }
+ <-c
+ <-c
+}
+
+func TestPreemptionGC(t *testing.T) {
+ t.Skip("gccgo does not implement preemption")
+ // Test that pending GC preempts running goroutines.
+ P := 5
+ N := 10
+ if testing.Short() {
+ P = 3
+ N = 2
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P + 1))
+ var stop uint32
+ for i := 0; i < P; i++ {
+ go func() {
+ for atomic.LoadUint32(&stop) == 0 {
+ preempt()
+ }
}()
}
- for p := 0; p < procs; p++ {
- <-c
+ for i := 0; i < N; i++ {
+ runtime.Gosched()
+ runtime.GC()
}
+ atomic.StoreUint32(&stop, 1)
}
-func BenchmarkStackGrowth(b *testing.B) {
- benchmarkStackGrowth(b, 10)
+func stackGrowthRecursive(i int) {
+ var pad [128]uint64
+ if i != 0 && pad[0] == 0 {
+ stackGrowthRecursive(i - 1)
+ }
}
-func BenchmarkStackGrowthDeep(b *testing.B) {
- benchmarkStackGrowth(b, 1024)
+func TestPreemptSplitBig(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping in -short mode")
+ }
+ t.Skip("gccgo does not implement preemption")
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
+ stop := make(chan int)
+ go big(stop)
+ for i := 0; i < 3; i++ {
+ time.Sleep(10 * time.Microsecond) // let big start running
+ runtime.GC()
+ }
+ close(stop)
+}
+
+func big(stop chan int) int {
+ n := 0
+ for {
+ // delay so that gc is sure to have asked for a preemption
+ for i := 0; i < 1e9; i++ {
+ n++
+ }
+
+ // call bigframe, which used to miss the preemption in its prologue.
+ bigframe(stop)
+
+ // check if we've been asked to stop.
+ select {
+ case <-stop:
+ return n
+ }
+ }
+}
+
+func bigframe(stop chan int) int {
+ // not splitting the stack will overflow.
+ // small will notice that it needs a stack split and will
+ // catch the overflow.
+ var x [8192]byte
+ return small(stop, &x)
}
-func BenchmarkSyscall(b *testing.B) {
- benchmarkSyscall(b, 0, 1)
+func small(stop chan int, x *[8192]byte) int {
+ for i := range x {
+ x[i] = byte(i)
+ }
+ sum := 0
+ for i := range x {
+ sum += int(x[i])
+ }
+
+ // keep small from being a leaf function, which might
+ // make it not do any stack check at all.
+ nonleaf(stop)
+
+ return sum
}
-func BenchmarkSyscallWork(b *testing.B) {
- benchmarkSyscall(b, 100, 1)
+func nonleaf(stop chan int) bool {
+ // do something that won't be inlined:
+ select {
+ case <-stop:
+ return true
+ default:
+ return false
+ }
}
-func BenchmarkSyscallExcess(b *testing.B) {
- benchmarkSyscall(b, 0, 4)
+func TestSchedLocalQueue(t *testing.T) {
+ runtime.TestSchedLocalQueue1()
}
-func BenchmarkSyscallExcessWork(b *testing.B) {
- benchmarkSyscall(b, 100, 4)
+func TestSchedLocalQueueSteal(t *testing.T) {
+ runtime.TestSchedLocalQueueSteal1()
}
-func benchmarkSyscall(b *testing.B, work, excess int) {
+func benchmarkStackGrowth(b *testing.B, rec int) {
const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1) * excess
+ procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
- foo := 42
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
- runtime.Entersyscall()
- for i := 0; i < work; i++ {
- foo *= 2
- foo /= 2
- }
- runtime.Exitsyscall()
+ stackGrowthRecursive(rec)
}
}
- c <- foo == 42
+ c <- true
}()
}
for p := 0; p < procs; p++ {
@@ -194,6 +350,14 @@ func benchmarkSyscall(b *testing.B, work, excess int) {
}
}
+func BenchmarkStackGrowth(b *testing.B) {
+ benchmarkStackGrowth(b, 10)
+}
+
+func BenchmarkStackGrowthDeep(b *testing.B) {
+ benchmarkStackGrowth(b, 1024)
+}
+
func BenchmarkCreateGoroutines(b *testing.B) {
benchmarkCreateGoroutines(b, 1)
}
diff --git a/libgo/go/runtime/runtime_test.go b/libgo/go/runtime/runtime_test.go
index e458793..d121929 100644
--- a/libgo/go/runtime/runtime_test.go
+++ b/libgo/go/runtime/runtime_test.go
@@ -6,6 +6,12 @@ package runtime_test
import (
"io"
+ // "io/ioutil"
+ // "os"
+ // "os/exec"
+ // . "runtime"
+ // "strconv"
+ // "strings"
"testing"
)
@@ -79,3 +85,43 @@ func BenchmarkDeferMany(b *testing.B) {
}(1, 2, 3)
}
}
+
+/* The go tool is not present in gccgo.
+
+// The profiling signal handler needs to know whether it is executing runtime.gogo.
+// The constant RuntimeGogoBytes in arch_*.h gives the size of the function;
+// we don't have a way to obtain it from the linker (perhaps someday).
+// Test that the constant matches the size determined by 'go tool nm -S'.
+// The value reported will include the padding between runtime.gogo and the
+// next function in memory. That's fine.
+func TestRuntimeGogoBytes(t *testing.T) {
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatalf("failed to create temp directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ out, err := exec.Command("go", "build", "-o", dir+"/hello", "../../../test/helloworld.go").CombinedOutput()
+ if err != nil {
+ t.Fatalf("building hello world: %v\n%s", err, out)
+ }
+
+ out, err = exec.Command("go", "tool", "nm", "-S", dir+"/hello").CombinedOutput()
+ if err != nil {
+ t.Fatalf("go tool nm: %v\n%s", err, out)
+ }
+
+ for _, line := range strings.Split(string(out), "\n") {
+ f := strings.Fields(line)
+ if len(f) == 4 && f[3] == "runtime.gogo" {
+ size, _ := strconv.Atoi(f[1])
+ if GogoBytes() != int32(size) {
+ t.Fatalf("RuntimeGogoBytes = %d, should be %d", GogoBytes(), size)
+ }
+ return
+ }
+ }
+
+ t.Fatalf("go tool nm did not report size for runtime.gogo")
+}
+*/