diff options
author | Ian Lance Taylor <iant@google.com> | 2015-01-15 00:27:56 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2015-01-15 00:27:56 +0000 |
commit | f8d9fa9e80b57f89e7877ce6cad8a3464879009b (patch) | |
tree | 58a1724fee16d2b03c65678c4dd9b50bb97137a9 /libgo/go/sync | |
parent | 6bd3f109d8d8fa58eeccd6b3504721b4f20c00c2 (diff) | |
download | gcc-f8d9fa9e80b57f89e7877ce6cad8a3464879009b.zip gcc-f8d9fa9e80b57f89e7877ce6cad8a3464879009b.tar.gz gcc-f8d9fa9e80b57f89e7877ce6cad8a3464879009b.tar.bz2 |
libgo, compiler: Upgrade libgo to Go 1.4, except for runtime.
This upgrades all of libgo other than the runtime package to
the Go 1.4 release. In Go 1.4 much of the runtime was
rewritten into Go. Merging that code will take more time and
will not change the API, so I'm putting it off for now.
There are a few runtime changes anyhow, to accomodate other
packages that rely on minor modifications to the runtime
support.
The compiler changes slightly to add a one-bit flag to each
type descriptor kind that is stored directly in an interface,
which for gccgo is currently only pointer types. Another
one-bit flag (gcprog) is reserved because it is used by the gc
compiler, but gccgo does not currently use it.
There is another error check in the compiler since I ran
across it during testing.
gotools/:
* Makefile.am (go_cmd_go_files): Sort entries. Add generate.go.
* Makefile.in: Rebuild.
From-SVN: r219627
Diffstat (limited to 'libgo/go/sync')
-rw-r--r-- | libgo/go/sync/atomic/64bit_arm.go | 12 | ||||
-rw-r--r-- | libgo/go/sync/atomic/doc.go | 2 | ||||
-rw-r--r-- | libgo/go/sync/atomic/race.go | 276 | ||||
-rw-r--r-- | libgo/go/sync/atomic/value.go | 85 | ||||
-rw-r--r-- | libgo/go/sync/atomic/value_test.go | 195 | ||||
-rw-r--r-- | libgo/go/sync/once.go | 7 | ||||
-rw-r--r-- | libgo/go/sync/once_test.go | 26 | ||||
-rw-r--r-- | libgo/go/sync/pool.go | 2 | ||||
-rw-r--r-- | libgo/go/sync/pool_test.go | 60 | ||||
-rw-r--r-- | libgo/go/sync/runtime.go | 8 | ||||
-rw-r--r-- | libgo/go/sync/rwmutex.go | 10 | ||||
-rw-r--r-- | libgo/go/sync/rwmutex_test.go | 42 | ||||
-rw-r--r-- | libgo/go/sync/waitgroup.go | 11 |
13 files changed, 409 insertions, 327 deletions
diff --git a/libgo/go/sync/atomic/64bit_arm.go b/libgo/go/sync/atomic/64bit_arm.go index c08f214..b98e608 100644 --- a/libgo/go/sync/atomic/64bit_arm.go +++ b/libgo/go/sync/atomic/64bit_arm.go @@ -44,3 +44,15 @@ func swapUint64(addr *uint64, new uint64) (old uint64) { } return } + +// Additional ARM-specific assembly routines. +// Declaration here to give assembly routines correct stack maps for arguments. +func armCompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) +func armCompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) +func generalCAS64(addr *uint64, old, new uint64) (swapped bool) +func armAddUint32(addr *uint32, delta uint32) (new uint32) +func armAddUint64(addr *uint64, delta uint64) (new uint64) +func armSwapUint32(addr *uint32, new uint32) (old uint32) +func armSwapUint64(addr *uint64, new uint64) (old uint64) +func armLoadUint64(addr *uint64) (val uint64) +func armStoreUint64(addr *uint64, val uint64) diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go index 17ba72f..10fb8c9 100644 --- a/libgo/go/sync/atomic/doc.go +++ b/libgo/go/sync/atomic/doc.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !race - // Package atomic provides low-level atomic memory primitives // useful for implementing synchronization algorithms. // diff --git a/libgo/go/sync/atomic/race.go b/libgo/go/sync/atomic/race.go deleted file mode 100644 index 6cbbf12..0000000 --- a/libgo/go/sync/atomic/race.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build race - -package atomic - -import ( - "runtime" - "unsafe" -) - -// We use runtime.RaceRead() inside of atomic operations to catch races -// between atomic and non-atomic operations. It will also catch races -// between Mutex.Lock() and mutex overwrite (mu = Mutex{}). Since we use -// only RaceRead() we won't catch races with non-atomic loads. -// Otherwise (if we use RaceWrite()) we will report races -// between atomic operations (false positives). - -var mtx uint32 = 1 // same for all - -func SwapInt32(addr *int32, new int32) (old int32) { - return int32(SwapUint32((*uint32)(unsafe.Pointer(addr)), uint32(new))) -} - -func SwapUint32(addr *uint32, new uint32) (old uint32) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - old = *addr - *addr = new - runtime.RaceReleaseMerge(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) - return -} - -func SwapInt64(addr *int64, new int64) (old int64) { - return int64(SwapUint64((*uint64)(unsafe.Pointer(addr)), uint64(new))) -} - -func SwapUint64(addr *uint64, new uint64) (old uint64) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - old = *addr - *addr = new - runtime.RaceReleaseMerge(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) - return -} - -func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) { - return uintptr(SwapPointer((*unsafe.Pointer)(unsafe.Pointer(addr)), unsafe.Pointer(new))) -} - -func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - old = *addr - *addr = new - runtime.RaceReleaseMerge(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapInt32(val *int32, old, new int32) bool { - return CompareAndSwapUint32((*uint32)(unsafe.Pointer(val)), uint32(old), uint32(new)) -} - -func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapInt64(val *int64, old, new int64) bool { - return CompareAndSwapUint64((*uint64)(unsafe.Pointer(val)), uint64(old), uint64(new)) -} - -func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool) { - _ = *val - swapped = false - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - if *val == old { - *val = new - swapped = true - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - } - runtime.RaceSemrelease(&mtx) - return -} - -func AddInt32(val *int32, delta int32) int32 { - return int32(AddUint32((*uint32)(unsafe.Pointer(val)), uint32(delta))) -} - -func AddUint32(val *uint32, delta uint32) (new uint32) { - _ = *val - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - *val = *val + delta - new = *val - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - runtime.RaceSemrelease(&mtx) - - return -} - -func AddInt64(val *int64, delta int64) int64 { - return int64(AddUint64((*uint64)(unsafe.Pointer(val)), uint64(delta))) -} - -func AddUint64(val *uint64, delta uint64) (new uint64) { - _ = *val - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - *val = *val + delta - new = *val - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - runtime.RaceSemrelease(&mtx) - - return -} - -func AddUintptr(val *uintptr, delta uintptr) (new uintptr) { - _ = *val - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(val)) - runtime.RaceAcquire(unsafe.Pointer(val)) - *val = *val + delta - new = *val - runtime.RaceReleaseMerge(unsafe.Pointer(val)) - runtime.RaceSemrelease(&mtx) - - return -} - -func LoadInt32(addr *int32) int32 { - return int32(LoadUint32((*uint32)(unsafe.Pointer(addr)))) -} - -func LoadUint32(addr *uint32) (val uint32) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func LoadInt64(addr *int64) int64 { - return int64(LoadUint64((*uint64)(unsafe.Pointer(addr)))) -} - -func LoadUint64(addr *uint64) (val uint64) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func LoadUintptr(addr *uintptr) (val uintptr) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - runtime.RaceAcquire(unsafe.Pointer(addr)) - val = *addr - runtime.RaceSemrelease(&mtx) - return -} - -func StoreInt32(addr *int32, val int32) { - StoreUint32((*uint32)(unsafe.Pointer(addr)), uint32(val)) -} - -func StoreUint32(addr *uint32, val uint32) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} - -func StoreInt64(addr *int64, val int64) { - StoreUint64((*uint64)(unsafe.Pointer(addr)), uint64(val)) -} - -func StoreUint64(addr *uint64, val uint64) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} - -func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} - -func StoreUintptr(addr *uintptr, val uintptr) { - _ = *addr - runtime.RaceSemacquire(&mtx) - runtime.RaceRead(unsafe.Pointer(addr)) - *addr = val - runtime.RaceRelease(unsafe.Pointer(addr)) - runtime.RaceSemrelease(&mtx) -} diff --git a/libgo/go/sync/atomic/value.go b/libgo/go/sync/atomic/value.go new file mode 100644 index 0000000..ab3aa112 --- /dev/null +++ b/libgo/go/sync/atomic/value.go @@ -0,0 +1,85 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic + +import ( + "unsafe" +) + +// A Value provides an atomic load and store of a consistently typed value. +// Values can be created as part of other data structures. +// The zero value for a Value returns nil from Load. +// Once Store has been called, a Value must not be copied. +type Value struct { + v interface{} +} + +// ifaceWords is interface{} internal representation. +type ifaceWords struct { + typ unsafe.Pointer + data unsafe.Pointer +} + +// Load returns the value set by the most recent Store. +// It returns nil if there has been no call to Store for this Value. +func (v *Value) Load() (x interface{}) { + vp := (*ifaceWords)(unsafe.Pointer(v)) + typ := LoadPointer(&vp.typ) + if typ == nil || uintptr(typ) == ^uintptr(0) { + // First store not yet completed. + return nil + } + data := LoadPointer(&vp.data) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + xp.typ = typ + xp.data = data + return +} + +// Store sets the value of the Value to x. +// All calls to Store for a given Value must use values of the same concrete type. +// Store of an inconsistent type panics, as does Store(nil). +func (v *Value) Store(x interface{}) { + if x == nil { + panic("sync/atomic: store of nil value into Value") + } + vp := (*ifaceWords)(unsafe.Pointer(v)) + xp := (*ifaceWords)(unsafe.Pointer(&x)) + for { + typ := LoadPointer(&vp.typ) + if typ == nil { + // Attempt to start first store. + // Disable preemption so that other goroutines can use + // active spin wait to wait for completion; and so that + // GC does not see the fake type accidentally. + runtime_procPin() + if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(^uintptr(0))) { + runtime_procUnpin() + continue + } + // Complete first store. + StorePointer(&vp.data, xp.data) + StorePointer(&vp.typ, xp.typ) + runtime_procUnpin() + return + } + if uintptr(typ) == ^uintptr(0) { + // First store in progress. Wait. + // Since we disable preemption around the first store, + // we can wait with active spinning. + continue + } + // First store completed. Check type and overwrite data. + if typ != xp.typ { + panic("sync/atomic: store of inconsistently typed value into Value") + } + StorePointer(&vp.data, xp.data) + return + } +} + +// Disable/enable preemption, implemented in runtime. +func runtime_procPin() +func runtime_procUnpin() diff --git a/libgo/go/sync/atomic/value_test.go b/libgo/go/sync/atomic/value_test.go new file mode 100644 index 0000000..382dc68 --- /dev/null +++ b/libgo/go/sync/atomic/value_test.go @@ -0,0 +1,195 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "math/rand" + "runtime" + "sync" + . "sync/atomic" + "testing" + "time" +) + +func TestValue(t *testing.T) { + var v Value + if v.Load() != nil { + t.Fatal("initial Value is not nil") + } + v.Store(42) + x := v.Load() + if xx, ok := x.(int); !ok || xx != 42 { + t.Fatalf("wrong value: got %+v, want 42", x) + } + v.Store(84) + x = v.Load() + if xx, ok := x.(int); !ok || xx != 84 { + t.Fatalf("wrong value: got %+v, want 84", x) + } +} + +func TestValueLarge(t *testing.T) { + var v Value + v.Store("foo") + x := v.Load() + if xx, ok := x.(string); !ok || xx != "foo" { + t.Fatalf("wrong value: got %+v, want foo", x) + } + v.Store("barbaz") + x = v.Load() + if xx, ok := x.(string); !ok || xx != "barbaz" { + t.Fatalf("wrong value: got %+v, want barbaz", x) + } +} + +func TestValuePanic(t *testing.T) { + const nilErr = "sync/atomic: store of nil value into Value" + const badErr = "sync/atomic: store of inconsistently typed value into Value" + var v Value + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() + v.Store(42) + func() { + defer func() { + err := recover() + if err != badErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, badErr) + } + }() + v.Store("foo") + }() + func() { + defer func() { + err := recover() + if err != nilErr { + t.Fatalf("inconsistent store panic: got '%v', want '%v'", err, nilErr) + } + }() + v.Store(nil) + }() +} + +func TestValueConcurrent(t *testing.T) { + tests := [][]interface{}{ + {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)}, + {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)}, + {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)}, + {complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)}, + } + p := 4 * runtime.GOMAXPROCS(0) + for _, test := range tests { + var v Value + done := make(chan bool) + for i := 0; i < p; i++ { + go func() { + r := rand.New(rand.NewSource(rand.Int63())) + loop: + for j := 0; j < 1e5; j++ { + x := test[r.Intn(len(test))] + v.Store(x) + x = v.Load() + for _, x1 := range test { + if x == x1 { + continue loop + } + } + t.Logf("loaded unexpected value %+v, want %+v", x, test) + done <- false + } + done <- true + }() + } + for i := 0; i < p; i++ { + if !<-done { + t.FailNow() + } + } + } +} + +func BenchmarkValueRead(b *testing.B) { + var v Value + v.Store(new(int)) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + x := v.Load().(*int) + if *x != 0 { + b.Fatalf("wrong value: got %v, want 0", *x) + } + } + }) +} + +// The following example shows how to use Value for periodic program config updates +// and propagation of the changes to worker goroutines. +func ExampleValue_config() { + var config Value // holds current server configuration + // Create initial config value and store into config. + config.Store(loadConfig()) + go func() { + // Reload config every 10 seconds + // and update config value with the new version. + for { + time.Sleep(10 * time.Second) + config.Store(loadConfig()) + } + }() + // Create worker goroutines that handle incoming requests + // using the latest config value. + for i := 0; i < 10; i++ { + go func() { + for r := range requests() { + c := config.Load() + // Handle request r using config c. + _, _ = r, c + } + }() + } +} + +func loadConfig() map[string]string { + return make(map[string]string) +} + +func requests() chan int { + return make(chan int) +} + +// The following example shows how to maintain a scalable frequently read, +// but infrequently updated data structure using copy-on-write idiom. +func ExampleValue_readMostly() { + type Map map[string]string + var m Value + m.Store(make(Map)) + var mu sync.Mutex // used only by writers + // read function can be used to read the data without further synchronization + read := func(key string) (val string) { + m1 := m.Load().(Map) + return m1[key] + } + // insert function can be used to update the data without further synchronization + insert := func(key, val string) { + mu.Lock() // synchronize with other potential writers + defer mu.Unlock() + m1 := m.Load().(Map) // load current value of the data structure + m2 := make(Map) // create a new value + for k, v := range m1 { + m2[k] = v // copy all data from the current object to the new one + } + m2[key] = val // do the update that we need + m.Store(m2) // atomically replace the current object with the new one + // At this point all new readers start working with the new version. + // The old version will be garbage collected once the existing readers + // (if any) are done with it. + } + _, _ = read, insert +} diff --git a/libgo/go/sync/once.go b/libgo/go/sync/once.go index 161ae3b..10b42fd 100644 --- a/libgo/go/sync/once.go +++ b/libgo/go/sync/once.go @@ -15,7 +15,7 @@ type Once struct { } // Do calls the function f if and only if Do is being called for the -// first time for this instance of Once. In other words, given +// first time for this instance of Once. In other words, given // var once Once // if once.Do(f) is called multiple times, only the first call will invoke f, // even if f has a different value in each invocation. A new instance of @@ -29,6 +29,9 @@ type Once struct { // Because no call to Do returns until the one call to f returns, if f causes // Do to be called, it will deadlock. // +// If f panics, Do considers it to have returned; future calls of Do return +// without calling f. +// func (o *Once) Do(f func()) { if atomic.LoadUint32(&o.done) == 1 { return @@ -37,7 +40,7 @@ func (o *Once) Do(f func()) { o.m.Lock() defer o.m.Unlock() if o.done == 0 { + defer atomic.StoreUint32(&o.done, 1) f() - atomic.StoreUint32(&o.done, 1) } } diff --git a/libgo/go/sync/once_test.go b/libgo/go/sync/once_test.go index 8afda82f..1eec8d1 100644 --- a/libgo/go/sync/once_test.go +++ b/libgo/go/sync/once_test.go @@ -40,22 +40,20 @@ func TestOnce(t *testing.T) { } func TestOncePanic(t *testing.T) { - once := new(Once) - for i := 0; i < 2; i++ { - func() { - defer func() { - if recover() == nil { - t.Fatalf("Once.Do() has not panic'ed") - } - }() - once.Do(func() { - panic("failed") - }) + var once Once + func() { + defer func() { + if r := recover(); r == nil { + t.Fatalf("Once.Do did not panic") + } }() - } - once.Do(func() {}) + once.Do(func() { + panic("failed") + }) + }() + once.Do(func() { - t.Fatalf("Once called twice") + t.Fatalf("Once.Do called twice") }) } diff --git a/libgo/go/sync/pool.go b/libgo/go/sync/pool.go index 1f08707..0cf06370 100644 --- a/libgo/go/sync/pool.go +++ b/libgo/go/sync/pool.go @@ -200,6 +200,8 @@ func poolCleanup() { } l.shared = nil } + p.local = nil + p.localSize = 0 } allPools = []*Pool{} } diff --git a/libgo/go/sync/pool_test.go b/libgo/go/sync/pool_test.go index c13477d..051bb17 100644 --- a/libgo/go/sync/pool_test.go +++ b/libgo/go/sync/pool_test.go @@ -69,37 +69,45 @@ func TestPoolNew(t *testing.T) { } } -// Test that Pool does not hold pointers to previously cached -// resources +// Test that Pool does not hold pointers to previously cached resources. func TestPoolGC(t *testing.T) { + testPool(t, true) +} + +// Test that Pool releases resources on GC. +func TestPoolRelease(t *testing.T) { + testPool(t, false) +} + +func testPool(t *testing.T, drain bool) { + t.Skip("gccgo imprecise GC breaks this test") var p Pool - var fin uint32 const N = 100 - for i := 0; i < N; i++ { - v := new(string) - runtime.SetFinalizer(v, func(vv *string) { - atomic.AddUint32(&fin, 1) - }) - p.Put(v) - } - for i := 0; i < N; i++ { - p.Get() - } - for i := 0; i < 5; i++ { - runtime.GC() - time.Sleep(time.Duration(i*100+10) * time.Millisecond) - // 1 pointer can remain on stack or elsewhere - if atomic.LoadUint32(&fin) >= N-1 { - return +loop: + for try := 0; try < 3; try++ { + var fin, fin1 uint32 + for i := 0; i < N; i++ { + v := new(string) + runtime.SetFinalizer(v, func(vv *string) { + atomic.AddUint32(&fin, 1) + }) + p.Put(v) } - - // gccgo has a less precise heap. - if runtime.Compiler == "gccgo" && atomic.LoadUint32(&fin) >= N-5 { - return + if drain { + for i := 0; i < N; i++ { + p.Get() + } + } + for i := 0; i < 5; i++ { + runtime.GC() + time.Sleep(time.Duration(i*100+10) * time.Millisecond) + // 1 pointer can remain on stack or elsewhere + if fin1 = atomic.LoadUint32(&fin); fin1 >= N-1 { + continue loop + } } + t.Fatalf("only %v out of %v resources are finalized on try %v", fin1, N, try) } - t.Fatalf("only %v out of %v resources are finalized", - atomic.LoadUint32(&fin), N) } func TestPoolStress(t *testing.T) { @@ -141,7 +149,7 @@ func BenchmarkPool(b *testing.B) { }) } -func BenchmarkPoolOverlflow(b *testing.B) { +func BenchmarkPoolOverflow(b *testing.B) { var p Pool b.RunParallel(func(pb *testing.PB) { for pb.Next() { diff --git a/libgo/go/sync/runtime.go b/libgo/go/sync/runtime.go index 3bf47ea..3b86630 100644 --- a/libgo/go/sync/runtime.go +++ b/libgo/go/sync/runtime.go @@ -19,8 +19,12 @@ func runtime_Semacquire(s *uint32) // library and should not be used directly. func runtime_Semrelease(s *uint32) -// Opaque representation of SyncSema in runtime/sema.goc. -type syncSema [3]uintptr +// Approximation of syncSema in runtime/sema.go. +type syncSema struct { + lock uintptr + head unsafe.Pointer + tail unsafe.Pointer +} // Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s. func runtime_Syncsemacquire(s *syncSema) diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go index 3db5419..0e8a58e 100644 --- a/libgo/go/sync/rwmutex.go +++ b/libgo/go/sync/rwmutex.go @@ -51,7 +51,11 @@ func (rw *RWMutex) RUnlock() { raceReleaseMerge(unsafe.Pointer(&rw.writerSem)) raceDisable() } - if atomic.AddInt32(&rw.readerCount, -1) < 0 { + if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { + if r+1 == 0 || r+1 == -rwmutexMaxReaders { + raceEnable() + panic("sync: RUnlock of unlocked RWMutex") + } // A writer is pending. if atomic.AddInt32(&rw.readerWait, -1) == 0 { // The last reader unblocks the writer. @@ -105,6 +109,10 @@ func (rw *RWMutex) Unlock() { // Announce to readers there is no active writer. r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) + if r >= rwmutexMaxReaders { + raceEnable() + panic("sync: Unlock of unlocked RWMutex") + } // Unblock blocked readers, if any. for i := 0; i < int(r); i++ { runtime_Semrelease(&rw.readerSem) diff --git a/libgo/go/sync/rwmutex_test.go b/libgo/go/sync/rwmutex_test.go index 0436f97..f625bc3 100644 --- a/libgo/go/sync/rwmutex_test.go +++ b/libgo/go/sync/rwmutex_test.go @@ -155,6 +155,48 @@ func TestRLocker(t *testing.T) { } } +func TestUnlockPanic(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.Unlock() +} + +func TestUnlockPanic2(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.RLock() + mu.Unlock() +} + +func TestRUnlockPanic(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("read unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.RUnlock() +} + +func TestRUnlockPanic2(t *testing.T) { + defer func() { + if recover() == nil { + t.Fatalf("read unlock of unlocked RWMutex did not panic") + } + }() + var mu RWMutex + mu.Lock() + mu.RUnlock() +} + func BenchmarkRWMutexUncontended(b *testing.B) { type PaddedRWMutex struct { RWMutex diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go index 4c64dca..92cc57d 100644 --- a/libgo/go/sync/waitgroup.go +++ b/libgo/go/sync/waitgroup.go @@ -37,10 +37,13 @@ type WaitGroup struct { // If the counter becomes zero, all goroutines blocked on Wait are released. // If the counter goes negative, Add panics. // -// Note that calls with positive delta must happen before the call to Wait, -// or else Wait may wait for too small a group. Typically this means the calls -// to Add should execute before the statement creating the goroutine or -// other event to be waited for. See the WaitGroup example. +// Note that calls with a positive delta that occur when the counter is zero +// must happen before a Wait. Calls with a negative delta, or calls with a +// positive delta that start when the counter is greater than zero, may happen +// at any time. +// Typically this means the calls to Add should execute before the statement +// creating the goroutine or other event to be waited for. +// See the WaitGroup example. func (wg *WaitGroup) Add(delta int) { if raceenabled { _ = wg.m.state // trigger nil deref early |