aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2014-06-06 22:37:27 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2014-06-06 22:37:27 +0000
commit6736ef96eab222e58e6294f42be981a5afb59811 (patch)
tree2bc668fae9bf96f9a3988e0b0a16685bde8c4f0b /libgo/go/runtime
parent38a138411da4206c53f9a153ee9c3624fce58a52 (diff)
downloadgcc-6736ef96eab222e58e6294f42be981a5afb59811.zip
gcc-6736ef96eab222e58e6294f42be981a5afb59811.tar.gz
gcc-6736ef96eab222e58e6294f42be981a5afb59811.tar.bz2
libgo: Merge to master revision 19184.
The next revision, 19185, renames several runtime files, and will be handled in a separate change. From-SVN: r211328
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/append_test.go19
-rw-r--r--libgo/go/runtime/chan_test.go625
-rw-r--r--libgo/go/runtime/debug/garbage.go4
-rw-r--r--libgo/go/runtime/debug/stack.go6
-rw-r--r--libgo/go/runtime/error.go7
-rw-r--r--libgo/go/runtime/export_test.go4
-rw-r--r--libgo/go/runtime/lfstack_test.go12
-rw-r--r--libgo/go/runtime/map_test.go3
-rw-r--r--libgo/go/runtime/mem.go7
-rw-r--r--libgo/go/runtime/memmove_test.go99
-rw-r--r--libgo/go/runtime/pprof/pprof.go2
-rw-r--r--libgo/go/runtime/pprof/pprof_test.go25
-rw-r--r--libgo/go/runtime/proc_test.go43
13 files changed, 664 insertions, 192 deletions
diff --git a/libgo/go/runtime/append_test.go b/libgo/go/runtime/append_test.go
index 937c825..a67dc9b 100644
--- a/libgo/go/runtime/append_test.go
+++ b/libgo/go/runtime/append_test.go
@@ -19,6 +19,25 @@ func BenchmarkAppend(b *testing.B) {
}
}
+func BenchmarkAppendGrowByte(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var x []byte
+ for j := 0; j < 1<<20; j++ {
+ x = append(x, byte(j))
+ }
+ }
+}
+
+func BenchmarkAppendGrowString(b *testing.B) {
+ var s string
+ for i := 0; i < b.N; i++ {
+ var x []string
+ for j := 0; j < 1<<20; j++ {
+ x = append(x, s)
+ }
+ }
+}
+
func benchmarkAppendBytes(b *testing.B, length int) {
b.StopTimer()
x := make([]byte, 0, N)
diff --git a/libgo/go/runtime/chan_test.go b/libgo/go/runtime/chan_test.go
index eb2c7c6..782176c 100644
--- a/libgo/go/runtime/chan_test.go
+++ b/libgo/go/runtime/chan_test.go
@@ -9,8 +9,327 @@ import (
"sync"
"sync/atomic"
"testing"
+ "time"
)
+func TestChan(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := 200
+ if testing.Short() {
+ N = 20
+ }
+ for chanCap := 0; chanCap < N; chanCap++ {
+ {
+ // Ensure that receive from empty chan blocks.
+ c := make(chan int, chanCap)
+ recv1 := false
+ go func() {
+ _ = <-c
+ recv1 = true
+ }()
+ recv2 := false
+ go func() {
+ _, _ = <-c
+ recv2 = true
+ }()
+ time.Sleep(time.Millisecond)
+ if recv1 || recv2 {
+ t.Fatalf("chan[%d]: receive from empty chan", chanCap)
+ }
+ // Ensure that non-blocking receive does not block.
+ select {
+ case _ = <-c:
+ t.Fatalf("chan[%d]: receive from empty chan", chanCap)
+ default:
+ }
+ select {
+ case _, _ = <-c:
+ t.Fatalf("chan[%d]: receive from empty chan", chanCap)
+ default:
+ }
+ c <- 0
+ c <- 0
+ }
+
+ {
+ // Ensure that send to full chan blocks.
+ c := make(chan int, chanCap)
+ for i := 0; i < chanCap; i++ {
+ c <- i
+ }
+ sent := uint32(0)
+ go func() {
+ c <- 0
+ atomic.StoreUint32(&sent, 1)
+ }()
+ time.Sleep(time.Millisecond)
+ if atomic.LoadUint32(&sent) != 0 {
+ t.Fatalf("chan[%d]: send to full chan", chanCap)
+ }
+ // Ensure that non-blocking send does not block.
+ select {
+ case c <- 0:
+ t.Fatalf("chan[%d]: send to full chan", chanCap)
+ default:
+ }
+ <-c
+ }
+
+ {
+ // Ensure that we receive 0 from closed chan.
+ c := make(chan int, chanCap)
+ for i := 0; i < chanCap; i++ {
+ c <- i
+ }
+ close(c)
+ for i := 0; i < chanCap; i++ {
+ v := <-c
+ if v != i {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
+ }
+ }
+ if v := <-c; v != 0 {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
+ }
+ if v, ok := <-c; v != 0 || ok {
+ t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
+ }
+ }
+
+ {
+ // Ensure that close unblocks receive.
+ c := make(chan int, chanCap)
+ done := make(chan bool)
+ go func() {
+ v, ok := <-c
+ done <- v == 0 && ok == false
+ }()
+ time.Sleep(time.Millisecond)
+ close(c)
+ if !<-done {
+ t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
+ }
+ }
+
+ {
+ // Send 100 integers,
+ // ensure that we receive them non-corrupted in FIFO order.
+ c := make(chan int, chanCap)
+ go func() {
+ for i := 0; i < 100; i++ {
+ c <- i
+ }
+ }()
+ for i := 0; i < 100; i++ {
+ v := <-c
+ if v != i {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
+ }
+ }
+
+ // Same, but using recv2.
+ go func() {
+ for i := 0; i < 100; i++ {
+ c <- i
+ }
+ }()
+ for i := 0; i < 100; i++ {
+ v, ok := <-c
+ if !ok {
+ t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
+ }
+ if v != i {
+ t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
+ }
+ }
+
+ // Send 1000 integers in 4 goroutines,
+ // ensure that we receive what we send.
+ const P = 4
+ const L = 1000
+ for p := 0; p < P; p++ {
+ go func() {
+ for i := 0; i < L; i++ {
+ c <- i
+ }
+ }()
+ }
+ done := make(chan map[int]int)
+ for p := 0; p < P; p++ {
+ go func() {
+ recv := make(map[int]int)
+ for i := 0; i < L; i++ {
+ v := <-c
+ recv[v] = recv[v] + 1
+ }
+ done <- recv
+ }()
+ }
+ recv := make(map[int]int)
+ for p := 0; p < P; p++ {
+ for k, v := range <-done {
+ recv[k] = recv[k] + v
+ }
+ }
+ if len(recv) != L {
+ t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
+ }
+ for _, v := range recv {
+ if v != P {
+ t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
+ }
+ }
+ }
+
+ {
+ // Test len/cap.
+ c := make(chan int, chanCap)
+ if len(c) != 0 || cap(c) != chanCap {
+ t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
+ }
+ for i := 0; i < chanCap; i++ {
+ c <- i
+ }
+ if len(c) != chanCap || cap(c) != chanCap {
+ t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
+ }
+ }
+
+ }
+}
+
+func TestSelfSelect(t *testing.T) {
+ // Ensure that send/recv on the same chan in select
+ // does not crash nor deadlock.
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
+ for _, chanCap := range []int{0, 10} {
+ var wg sync.WaitGroup
+ wg.Add(2)
+ c := make(chan int, chanCap)
+ for p := 0; p < 2; p++ {
+ p := p
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 1000; i++ {
+ if p == 0 || i%2 == 0 {
+ select {
+ case c <- p:
+ case v := <-c:
+ if chanCap == 0 && v == p {
+ t.Fatalf("self receive")
+ }
+ }
+ } else {
+ select {
+ case v := <-c:
+ if chanCap == 0 && v == p {
+ t.Fatalf("self receive")
+ }
+ case c <- p:
+ }
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ }
+}
+
+func TestSelectStress(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
+ var c [4]chan int
+ c[0] = make(chan int)
+ c[1] = make(chan int)
+ c[2] = make(chan int, 2)
+ c[3] = make(chan int, 3)
+ N := int(1e5)
+ if testing.Short() {
+ N /= 10
+ }
+ // There are 4 goroutines that send N values on each of the chans,
+ // + 4 goroutines that receive N values on each of the chans,
+ // + 1 goroutine that sends N values on each of the chans in a single select,
+ // + 1 goroutine that receives N values on each of the chans in a single select.
+ // All these sends, receives and selects interact chaotically at runtime,
+ // but we are careful that this whole construct does not deadlock.
+ var wg sync.WaitGroup
+ wg.Add(10)
+ for k := 0; k < 4; k++ {
+ k := k
+ go func() {
+ for i := 0; i < N; i++ {
+ c[k] <- 0
+ }
+ wg.Done()
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ <-c[k]
+ }
+ wg.Done()
+ }()
+ }
+ go func() {
+ var n [4]int
+ c1 := c
+ for i := 0; i < 4*N; i++ {
+ select {
+ case c1[3] <- 0:
+ n[3]++
+ if n[3] == N {
+ c1[3] = nil
+ }
+ case c1[2] <- 0:
+ n[2]++
+ if n[2] == N {
+ c1[2] = nil
+ }
+ case c1[0] <- 0:
+ n[0]++
+ if n[0] == N {
+ c1[0] = nil
+ }
+ case c1[1] <- 0:
+ n[1]++
+ if n[1] == N {
+ c1[1] = nil
+ }
+ }
+ }
+ wg.Done()
+ }()
+ go func() {
+ var n [4]int
+ c1 := c
+ for i := 0; i < 4*N; i++ {
+ select {
+ case <-c1[0]:
+ n[0]++
+ if n[0] == N {
+ c1[0] = nil
+ }
+ case <-c1[1]:
+ n[1]++
+ if n[1] == N {
+ c1[1] = nil
+ }
+ case <-c1[2]:
+ n[2]++
+ if n[2] == N {
+ c1[2] = nil
+ }
+ case <-c1[3]:
+ n[3]++
+ if n[3] == N {
+ c1[3] = nil
+ }
+ }
+ }
+ wg.Done()
+ }()
+ wg.Wait()
+}
+
func TestChanSendInterface(t *testing.T) {
type mt struct{}
m := &mt{}
@@ -29,34 +348,35 @@ func TestChanSendInterface(t *testing.T) {
func TestPseudoRandomSend(t *testing.T) {
n := 100
- c := make(chan int)
- l := make([]int, n)
- var m sync.Mutex
- m.Lock()
- go func() {
+ for _, chanCap := range []int{0, n} {
+ c := make(chan int, chanCap)
+ l := make([]int, n)
+ var m sync.Mutex
+ m.Lock()
+ go func() {
+ for i := 0; i < n; i++ {
+ runtime.Gosched()
+ l[i] = <-c
+ }
+ m.Unlock()
+ }()
for i := 0; i < n; i++ {
- runtime.Gosched()
- l[i] = <-c
+ select {
+ case c <- 1:
+ case c <- 0:
+ }
}
- m.Unlock()
- }()
- for i := 0; i < n; i++ {
- select {
- case c <- 0:
- case c <- 1:
+ m.Lock() // wait
+ n0 := 0
+ n1 := 0
+ for _, i := range l {
+ n0 += (i + 1) % 2
+ n1 += i
}
- }
- m.Lock() // wait
- n0 := 0
- n1 := 0
- for _, i := range l {
- n0 += (i + 1) % 2
- n1 += i
- if n0 > n/10 && n1 > n/10 {
- return
+ if n0 <= n/10 || n1 <= n/10 {
+ t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
}
}
- t.Errorf("Want pseudo random, got %d zeros and %d ones", n0, n1)
}
func TestMultiConsumer(t *testing.T) {
@@ -110,23 +430,19 @@ func TestMultiConsumer(t *testing.T) {
}
}
-func BenchmarkSelectUncontended(b *testing.B) {
+func BenchmarkChanNonblocking(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
+ myc := make(chan int)
for p := 0; p < procs; p++ {
go func() {
- myc1 := make(chan int, 1)
- myc2 := make(chan int, 1)
- myc1 <- 0
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
select {
- case <-myc1:
- myc2 <- 0
- case <-myc2:
- myc1 <- 0
+ case <-myc:
+ default:
}
}
}
@@ -138,119 +454,94 @@ func BenchmarkSelectUncontended(b *testing.B) {
}
}
+func BenchmarkSelectUncontended(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ myc1 := make(chan int, 1)
+ myc2 := make(chan int, 1)
+ myc1 <- 0
+ for pb.Next() {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
+ }
+ }
+ })
+}
+
func BenchmarkSelectContended(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
+ procs := runtime.GOMAXPROCS(0)
myc1 := make(chan int, procs)
myc2 := make(chan int, procs)
- for p := 0; p < procs; p++ {
+ b.RunParallel(func(pb *testing.PB) {
myc1 <- 0
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- select {
- case <-myc1:
- myc2 <- 0
- case <-myc2:
- myc1 <- 0
- }
- }
+ for pb.Next() {
+ select {
+ case <-myc1:
+ myc2 <- 0
+ case <-myc2:
+ myc1 <- 0
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ }
+ })
}
func BenchmarkSelectNonblock(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- myc1 := make(chan int)
- myc2 := make(chan int)
- myc3 := make(chan int, 1)
- myc4 := make(chan int, 1)
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- select {
- case <-myc1:
- default:
- }
- select {
- case myc2 <- 0:
- default:
- }
- select {
- case <-myc3:
- default:
- }
- select {
- case myc4 <- 0:
- default:
- }
- }
+ b.RunParallel(func(pb *testing.PB) {
+ myc1 := make(chan int)
+ myc2 := make(chan int)
+ myc3 := make(chan int, 1)
+ myc4 := make(chan int, 1)
+ for pb.Next() {
+ select {
+ case <-myc1:
+ default:
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ select {
+ case myc2 <- 0:
+ default:
+ }
+ select {
+ case <-myc3:
+ default:
+ }
+ select {
+ case myc4 <- 0:
+ default:
+ }
+ }
+ })
}
func BenchmarkChanUncontended(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- for p := 0; p < procs; p++ {
- go func() {
- myc := make(chan int, CallsPerSched)
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- myc <- 0
- }
- for g := 0; g < CallsPerSched; g++ {
- <-myc
- }
+ const C = 100
+ b.RunParallel(func(pb *testing.PB) {
+ myc := make(chan int, C)
+ for pb.Next() {
+ for i := 0; i < C; i++ {
+ myc <- 0
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ for i := 0; i < C; i++ {
+ <-myc
+ }
+ }
+ })
}
func BenchmarkChanContended(b *testing.B) {
- const CallsPerSched = 1000
- procs := runtime.GOMAXPROCS(-1)
- N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
- myc := make(chan int, procs*CallsPerSched)
- for p := 0; p < procs; p++ {
- go func() {
- for atomic.AddInt32(&N, -1) >= 0 {
- for g := 0; g < CallsPerSched; g++ {
- myc <- 0
- }
- for g := 0; g < CallsPerSched; g++ {
- <-myc
- }
+ const C = 100
+ myc := make(chan int, C*runtime.GOMAXPROCS(0))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ for i := 0; i < C; i++ {
+ myc <- 0
}
- c <- true
- }()
- }
- for p := 0; p < procs; p++ {
- <-c
- }
+ for i := 0; i < C; i++ {
+ <-myc
+ }
+ }
+ })
}
func BenchmarkChanSync(b *testing.B) {
@@ -350,33 +641,95 @@ func BenchmarkChanProdConsWork100(b *testing.B) {
benchmarkChanProdCons(b, 100, 100)
}
-func BenchmarkChanCreation(b *testing.B) {
+func BenchmarkSelectProdCons(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
- c := make(chan bool, procs)
+ c := make(chan bool, 2*procs)
+ myc := make(chan int, 128)
+ myclose := make(chan bool)
for p := 0; p < procs; p++ {
go func() {
+ // Producer: sends to myc.
+ foo := 0
+ // Intended to not fire during benchmarking.
+ mytimer := time.After(time.Hour)
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
- myc := make(chan int, 1)
- myc <- 0
- <-myc
+ // Model some local work.
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ select {
+ case myc <- 1:
+ case <-mytimer:
+ case <-myclose:
+ }
}
}
- c <- true
+ myc <- 0
+ c <- foo == 42
+ }()
+ go func() {
+ // Consumer: receives from myc.
+ foo := 0
+ // Intended to not fire during benchmarking.
+ mytimer := time.After(time.Hour)
+ loop:
+ for {
+ select {
+ case v := <-myc:
+ if v == 0 {
+ break loop
+ }
+ case <-mytimer:
+ case <-myclose:
+ }
+ // Model some local work.
+ for i := 0; i < 100; i++ {
+ foo *= 2
+ foo /= 2
+ }
+ }
+ c <- foo == 42
}()
}
for p := 0; p < procs; p++ {
<-c
+ <-c
}
}
+func BenchmarkChanCreation(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ myc := make(chan int, 1)
+ myc <- 0
+ <-myc
+ }
+ })
+}
+
func BenchmarkChanSem(b *testing.B) {
type Empty struct{}
- c := make(chan Empty, 1)
- for i := 0; i < b.N; i++ {
- c <- Empty{}
+ const CallsPerSched = 1000
+ procs := runtime.GOMAXPROCS(0)
+ N := int32(b.N / CallsPerSched)
+ c := make(chan bool, procs)
+ myc := make(chan Empty, procs)
+ for p := 0; p < procs; p++ {
+ go func() {
+ for atomic.AddInt32(&N, -1) >= 0 {
+ for g := 0; g < CallsPerSched; g++ {
+ myc <- Empty{}
+ <-myc
+ }
+ }
+ c <- true
+ }()
+ }
+ for p := 0; p < procs; p++ {
<-c
}
}
diff --git a/libgo/go/runtime/debug/garbage.go b/libgo/go/runtime/debug/garbage.go
index 8337d5d..a724fdf 100644
--- a/libgo/go/runtime/debug/garbage.go
+++ b/libgo/go/runtime/debug/garbage.go
@@ -91,7 +91,9 @@ func (x byDuration) Less(i, j int) bool { return x[i] < x[j] }
// at startup, or 100 if the variable is not set.
// A negative percentage disables garbage collection.
func SetGCPercent(percent int) int {
- return setGCPercent(percent)
+ old := setGCPercent(percent)
+ runtime.GC()
+ return old
}
// FreeOSMemory forces a garbage collection followed by an
diff --git a/libgo/go/runtime/debug/stack.go b/libgo/go/runtime/debug/stack.go
index 2896b21..c29b0a2 100644
--- a/libgo/go/runtime/debug/stack.go
+++ b/libgo/go/runtime/debug/stack.go
@@ -18,6 +18,7 @@ var (
dunno = []byte("???")
centerDot = []byte("·")
dot = []byte(".")
+ slash = []byte("/")
)
// PrintStack prints to standard error the stack trace returned by Stack.
@@ -84,6 +85,11 @@ func function(pc uintptr) []byte {
// runtime/debug.*T·ptrmethod
// and want
// *T.ptrmethod
+ // Since the package path might contains dots (e.g. code.google.com/...),
+ // we first remove the path prefix if there is one.
+ if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 {
+ name = name[lastslash+1:]
+ }
if period := bytes.Index(name, dot); period >= 0 {
name = name[period+1:]
}
diff --git a/libgo/go/runtime/error.go b/libgo/go/runtime/error.go
index 88d5df5..d759a54 100644
--- a/libgo/go/runtime/error.go
+++ b/libgo/go/runtime/error.go
@@ -107,19 +107,20 @@ func NewErrorString(s string, ret *interface{}) {
}
// An errorCString represents a runtime error described by a single C string.
-type errorCString uintptr
+// Not "type errorCString uintptr" because of http://golang.org/issue/7084.
+type errorCString struct{ cstr uintptr }
func (e errorCString) RuntimeError() {}
func cstringToGo(uintptr) string
func (e errorCString) Error() string {
- return "runtime error: " + cstringToGo(uintptr(e))
+ return "runtime error: " + cstringToGo(e.cstr)
}
// For calling from C.
func NewErrorCString(s uintptr, ret *interface{}) {
- *ret = errorCString(s)
+ *ret = errorCString{s}
}
type stringer interface {
diff --git a/libgo/go/runtime/export_test.go b/libgo/go/runtime/export_test.go
index 2f678b6..436c28d 100644
--- a/libgo/go/runtime/export_test.go
+++ b/libgo/go/runtime/export_test.go
@@ -82,3 +82,7 @@ var TestSchedLocalQueueSteal1 = testSchedLocalQueueSteal
var hashLoad float64 // declared in hashmap.c
var HashLoad = &hashLoad
+
+func memclrBytes(b []byte)
+
+var MemclrBytes = memclrBytes
diff --git a/libgo/go/runtime/lfstack_test.go b/libgo/go/runtime/lfstack_test.go
index 505aae6..e518777 100644
--- a/libgo/go/runtime/lfstack_test.go
+++ b/libgo/go/runtime/lfstack_test.go
@@ -71,6 +71,8 @@ func TestLFStack(t *testing.T) {
}
}
+var stress []*MyNode
+
func TestLFStackStress(t *testing.T) {
const K = 100
P := 4 * GOMAXPROCS(-1)
@@ -80,14 +82,15 @@ func TestLFStackStress(t *testing.T) {
}
// Create 2 stacks.
stacks := [2]*uint64{new(uint64), new(uint64)}
- // Need to keep additional referenfces to nodes, the stack is not all that type-safe.
- var nodes []*MyNode
+ // Need to keep additional references to nodes,
+ // the lock-free stack is not type-safe.
+ stress = nil
// Push K elements randomly onto the stacks.
sum := 0
for i := 0; i < K; i++ {
sum += i
node := &MyNode{data: i}
- nodes = append(nodes, node)
+ stress = append(stress, node)
LFStackPush(stacks[i%2], fromMyNode(node))
}
c := make(chan bool, P)
@@ -127,4 +130,7 @@ func TestLFStackStress(t *testing.T) {
if sum2 != sum {
t.Fatalf("Wrong sum %d/%d", sum2, sum)
}
+
+ // Let nodes be collected now.
+ stress = nil
}
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index 3d18e3b..fe5d3ad 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -422,8 +422,7 @@ func TestMapIterOrder(t *testing.T) {
t.Skip("skipping for gccgo")
}
- // TODO: For issue 6719, add 3 and 7 to this list.
- for _, n := range [...]int{9, 15} {
+ for _, n := range [...]int{3, 7, 9, 15} {
// Make m be {0: true, 1: true, ..., n-1: true}.
m := make(map[int]bool)
for i := 0; i < n; i++ {
diff --git a/libgo/go/runtime/mem.go b/libgo/go/runtime/mem.go
index ba6d1cf..fb35535 100644
--- a/libgo/go/runtime/mem.go
+++ b/libgo/go/runtime/mem.go
@@ -60,11 +60,10 @@ type MemStats struct {
var Sizeof_C_MStats uintptr // filled in by malloc.goc
-var VmemStats MemStats
-
func init() {
- if Sizeof_C_MStats != unsafe.Sizeof(VmemStats) {
- println(Sizeof_C_MStats, unsafe.Sizeof(VmemStats))
+ var memStats MemStats
+ if Sizeof_C_MStats != unsafe.Sizeof(memStats) {
+ println(Sizeof_C_MStats, unsafe.Sizeof(memStats))
panic("MStats vs MemStatsType size mismatch")
}
}
diff --git a/libgo/go/runtime/memmove_test.go b/libgo/go/runtime/memmove_test.go
index 9525f06..5c01aac 100644
--- a/libgo/go/runtime/memmove_test.go
+++ b/libgo/go/runtime/memmove_test.go
@@ -5,6 +5,7 @@
package runtime_test
import (
+ . "runtime"
"testing"
)
@@ -80,7 +81,7 @@ func TestMemmoveAlias(t *testing.T) {
}
}
-func bmMemmove(n int, b *testing.B) {
+func bmMemmove(b *testing.B, n int) {
x := make([]byte, n)
y := make([]byte, n)
b.SetBytes(int64(n))
@@ -89,28 +90,74 @@ func bmMemmove(n int, b *testing.B) {
}
}
-func BenchmarkMemmove0(b *testing.B) { bmMemmove(0, b) }
-func BenchmarkMemmove1(b *testing.B) { bmMemmove(1, b) }
-func BenchmarkMemmove2(b *testing.B) { bmMemmove(2, b) }
-func BenchmarkMemmove3(b *testing.B) { bmMemmove(3, b) }
-func BenchmarkMemmove4(b *testing.B) { bmMemmove(4, b) }
-func BenchmarkMemmove5(b *testing.B) { bmMemmove(5, b) }
-func BenchmarkMemmove6(b *testing.B) { bmMemmove(6, b) }
-func BenchmarkMemmove7(b *testing.B) { bmMemmove(7, b) }
-func BenchmarkMemmove8(b *testing.B) { bmMemmove(8, b) }
-func BenchmarkMemmove9(b *testing.B) { bmMemmove(9, b) }
-func BenchmarkMemmove10(b *testing.B) { bmMemmove(10, b) }
-func BenchmarkMemmove11(b *testing.B) { bmMemmove(11, b) }
-func BenchmarkMemmove12(b *testing.B) { bmMemmove(12, b) }
-func BenchmarkMemmove13(b *testing.B) { bmMemmove(13, b) }
-func BenchmarkMemmove14(b *testing.B) { bmMemmove(14, b) }
-func BenchmarkMemmove15(b *testing.B) { bmMemmove(15, b) }
-func BenchmarkMemmove16(b *testing.B) { bmMemmove(16, b) }
-func BenchmarkMemmove32(b *testing.B) { bmMemmove(32, b) }
-func BenchmarkMemmove64(b *testing.B) { bmMemmove(64, b) }
-func BenchmarkMemmove128(b *testing.B) { bmMemmove(128, b) }
-func BenchmarkMemmove256(b *testing.B) { bmMemmove(256, b) }
-func BenchmarkMemmove512(b *testing.B) { bmMemmove(512, b) }
-func BenchmarkMemmove1024(b *testing.B) { bmMemmove(1024, b) }
-func BenchmarkMemmove2048(b *testing.B) { bmMemmove(2048, b) }
-func BenchmarkMemmove4096(b *testing.B) { bmMemmove(4096, b) }
+func BenchmarkMemmove0(b *testing.B) { bmMemmove(b, 0) }
+func BenchmarkMemmove1(b *testing.B) { bmMemmove(b, 1) }
+func BenchmarkMemmove2(b *testing.B) { bmMemmove(b, 2) }
+func BenchmarkMemmove3(b *testing.B) { bmMemmove(b, 3) }
+func BenchmarkMemmove4(b *testing.B) { bmMemmove(b, 4) }
+func BenchmarkMemmove5(b *testing.B) { bmMemmove(b, 5) }
+func BenchmarkMemmove6(b *testing.B) { bmMemmove(b, 6) }
+func BenchmarkMemmove7(b *testing.B) { bmMemmove(b, 7) }
+func BenchmarkMemmove8(b *testing.B) { bmMemmove(b, 8) }
+func BenchmarkMemmove9(b *testing.B) { bmMemmove(b, 9) }
+func BenchmarkMemmove10(b *testing.B) { bmMemmove(b, 10) }
+func BenchmarkMemmove11(b *testing.B) { bmMemmove(b, 11) }
+func BenchmarkMemmove12(b *testing.B) { bmMemmove(b, 12) }
+func BenchmarkMemmove13(b *testing.B) { bmMemmove(b, 13) }
+func BenchmarkMemmove14(b *testing.B) { bmMemmove(b, 14) }
+func BenchmarkMemmove15(b *testing.B) { bmMemmove(b, 15) }
+func BenchmarkMemmove16(b *testing.B) { bmMemmove(b, 16) }
+func BenchmarkMemmove32(b *testing.B) { bmMemmove(b, 32) }
+func BenchmarkMemmove64(b *testing.B) { bmMemmove(b, 64) }
+func BenchmarkMemmove128(b *testing.B) { bmMemmove(b, 128) }
+func BenchmarkMemmove256(b *testing.B) { bmMemmove(b, 256) }
+func BenchmarkMemmove512(b *testing.B) { bmMemmove(b, 512) }
+func BenchmarkMemmove1024(b *testing.B) { bmMemmove(b, 1024) }
+func BenchmarkMemmove2048(b *testing.B) { bmMemmove(b, 2048) }
+func BenchmarkMemmove4096(b *testing.B) { bmMemmove(b, 4096) }
+
+func TestMemclr(t *testing.T) {
+ size := 512
+ if testing.Short() {
+ size = 128 + 16
+ }
+ mem := make([]byte, size)
+ for i := 0; i < size; i++ {
+ mem[i] = 0xee
+ }
+ for n := 0; n < size; n++ {
+ for x := 0; x <= size-n; x++ { // offset in mem
+ MemclrBytes(mem[x : x+n])
+ for i := 0; i < x; i++ {
+ if mem[i] != 0xee {
+ t.Fatalf("overwrite prefix mem[%d] = %d", i, mem[i])
+ }
+ }
+ for i := x; i < x+n; i++ {
+ if mem[i] != 0 {
+ t.Fatalf("failed clear mem[%d] = %d", i, mem[i])
+ }
+ mem[i] = 0xee
+ }
+ for i := x + n; i < size; i++ {
+ if mem[i] != 0xee {
+ t.Fatalf("overwrite suffix mem[%d] = %d", i, mem[i])
+ }
+ }
+ }
+ }
+}
+
+func bmMemclr(b *testing.B, n int) {
+ x := make([]byte, n)
+ b.SetBytes(int64(n))
+ for i := 0; i < b.N; i++ {
+ MemclrBytes(x)
+ }
+}
+func BenchmarkMemclr5(b *testing.B) { bmMemclr(b, 5) }
+func BenchmarkMemclr16(b *testing.B) { bmMemclr(b, 16) }
+func BenchmarkMemclr64(b *testing.B) { bmMemclr(b, 64) }
+func BenchmarkMemclr256(b *testing.B) { bmMemclr(b, 256) }
+func BenchmarkMemclr4096(b *testing.B) { bmMemclr(b, 4096) }
+func BenchmarkMemclr65536(b *testing.B) { bmMemclr(b, 65536) }
diff --git a/libgo/go/runtime/pprof/pprof.go b/libgo/go/runtime/pprof/pprof.go
index 3b84285..9808045 100644
--- a/libgo/go/runtime/pprof/pprof.go
+++ b/libgo/go/runtime/pprof/pprof.go
@@ -20,7 +20,7 @@ import (
"text/tabwriter"
)
-// BUG(rsc): Profiles are incomplete and inaccuate on NetBSD, OpenBSD, and OS X.
+// BUG(rsc): Profiles are incomplete and inaccuate on NetBSD and OS X.
// See http://golang.org/issue/6047 for details.
// A Profile is a collection of stack traces showing the call sequences
diff --git a/libgo/go/runtime/pprof/pprof_test.go b/libgo/go/runtime/pprof/pprof_test.go
index e556ca1..923c5b3 100644
--- a/libgo/go/runtime/pprof/pprof_test.go
+++ b/libgo/go/runtime/pprof/pprof_test.go
@@ -33,10 +33,6 @@ func TestCPUProfile(t *testing.T) {
}
func TestCPUProfileMultithreaded(t *testing.T) {
- // TODO(brainman): delete when issue 6986 is fixed.
- if runtime.GOOS == "windows" && runtime.GOARCH == "amd64" {
- t.Skip("skipping broken test on windows-amd64-race")
- }
buf := make([]byte, 100000)
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
testCPUProfile(t, []string{"crc32.update"}, func() {
@@ -142,7 +138,11 @@ func testCPUProfile(t *testing.T, need []string, f func()) {
t.Logf("no CPU profile samples collected")
ok = false
}
- min := total / uintptr(len(have)) / 3
+ // We'd like to check a reasonable minimum, like
+ // total / len(have) / smallconstant, but this test is
+ // pretty flaky (see bug 7095). So we'll just test to
+ // make sure we got at least one sample.
+ min := uintptr(1)
for i, name := range need {
if have[i] < min {
t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
@@ -193,9 +193,6 @@ func TestCPUProfileWithFork(t *testing.T) {
// If it did, it would see inconsistent state and would either record an incorrect stack
// or crash because the stack was malformed.
func TestGoroutineSwitch(t *testing.T) {
- if runtime.GOOS == "windows" {
- t.Skip("flaky test; see http://golang.org/issue/6417")
- }
// How much to try. These defaults take about 1 seconds
// on a 2012 MacBook Pro. The ones in short mode take
// about 0.1 seconds.
@@ -221,7 +218,7 @@ func TestGoroutineSwitch(t *testing.T) {
// exists to record a PC without a traceback. Those are okay.
if len(stk) == 2 {
f := runtime.FuncForPC(stk[1])
- if f != nil && f.Name() == "System" {
+ if f != nil && (f.Name() == "System" || f.Name() == "ExternalCode") {
return
}
}
@@ -248,10 +245,6 @@ func TestGoroutineSwitch(t *testing.T) {
// Test that profiling of division operations is okay, especially on ARM. See issue 6681.
func TestMathBigDivide(t *testing.T) {
- // TODO(brainman): delete when issue 6986 is fixed.
- if runtime.GOOS == "windows" && runtime.GOARCH == "amd64" {
- t.Skip("skipping broken test on windows-amd64-race")
- }
testCPUProfile(t, nil, func() {
t := time.After(5 * time.Second)
pi := new(big.Int)
@@ -272,9 +265,9 @@ func TestMathBigDivide(t *testing.T) {
// Operating systems that are expected to fail the tests. See issue 6047.
var badOS = map[string]bool{
- "darwin": true,
- "netbsd": true,
- "openbsd": true,
+ "darwin": true,
+ "netbsd": true,
+ "plan9": true,
}
func TestBlockProfile(t *testing.T) {
diff --git a/libgo/go/runtime/proc_test.go b/libgo/go/runtime/proc_test.go
index 29f71e7..bdcb199 100644
--- a/libgo/go/runtime/proc_test.go
+++ b/libgo/go/runtime/proc_test.go
@@ -246,6 +246,49 @@ func TestPreemptionGC(t *testing.T) {
atomic.StoreUint32(&stop, 1)
}
+func TestGCFairness(t *testing.T) {
+ output := executeTest(t, testGCFairnessSource, nil)
+ want := "OK\n"
+ if output != want {
+ t.Fatalf("want %s, got %s\n", want, output)
+ }
+}
+
+const testGCFairnessSource = `
+package main
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "time"
+)
+
+func main() {
+ runtime.GOMAXPROCS(1)
+ f, err := os.Open("/dev/null")
+ if os.IsNotExist(err) {
+ // This test tests what it is intended to test only if writes are fast.
+ // If there is no /dev/null, we just don't execute the test.
+ fmt.Println("OK")
+ return
+ }
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ for i := 0; i < 2; i++ {
+ go func() {
+ for {
+ f.Write([]byte("."))
+ }
+ }()
+ }
+ time.Sleep(10 * time.Millisecond)
+ fmt.Println("OK")
+}
+`
+
func stackGrowthRecursive(i int) {
var pad [128]uint64
if i != 0 && pad[0] == 0 {