diff options
Diffstat (limited to 'libgo/go/sync')
-rw-r--r-- | libgo/go/sync/atomic/64bit_arm.go | 58 | ||||
-rw-r--r-- | libgo/go/sync/atomic/example_test.go | 76 | ||||
-rw-r--r-- | libgo/go/sync/atomic/value_test.go | 67 | ||||
-rw-r--r-- | libgo/go/sync/cond.go | 3 | ||||
-rw-r--r-- | libgo/go/sync/cond_test.go | 7 | ||||
-rw-r--r-- | libgo/go/sync/rwmutex.go | 5 | ||||
-rw-r--r-- | libgo/go/sync/waitgroup.go | 25 | ||||
-rw-r--r-- | libgo/go/sync/waitgroup_test.go | 27 |
8 files changed, 115 insertions, 153 deletions
diff --git a/libgo/go/sync/atomic/64bit_arm.go b/libgo/go/sync/atomic/64bit_arm.go deleted file mode 100644 index 4ef1174..0000000 --- a/libgo/go/sync/atomic/64bit_arm.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package atomic - -func loadUint64(addr *uint64) (val uint64) { - for { - val = *addr - if CompareAndSwapUint64(addr, val, val) { - break - } - } - return -} - -func storeUint64(addr *uint64, val uint64) { - for { - old := *addr - if CompareAndSwapUint64(addr, old, val) { - break - } - } - return -} - -func addUint64(val *uint64, delta uint64) (new uint64) { - for { - old := *val - new = old + delta - if CompareAndSwapUint64(val, old, new) { - break - } - } - return -} - -func swapUint64(addr *uint64, new uint64) (old uint64) { - for { - old = *addr - if CompareAndSwapUint64(addr, old, new) { - break - } - } - return -} - -// Additional ARM-specific assembly routines. -// Declaration here to give assembly routines correct stack maps for arguments. -func armCompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool) -func armCompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool) -func generalCAS64(addr *uint64, old, new uint64) (swapped bool) -func armAddUint32(addr *uint32, delta uint32) (new uint32) -func armAddUint64(addr *uint64, delta uint64) (new uint64) -func armSwapUint32(addr *uint32, new uint32) (old uint32) -func armSwapUint64(addr *uint64, new uint64) (old uint64) -func armLoadUint64(addr *uint64) (val uint64) -func armStoreUint64(addr *uint64, val uint64) diff --git a/libgo/go/sync/atomic/example_test.go b/libgo/go/sync/atomic/example_test.go new file mode 100644 index 0000000..09ae0aa --- /dev/null +++ b/libgo/go/sync/atomic/example_test.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package atomic_test + +import ( + "sync" + "sync/atomic" + "time" +) + +func loadConfig() map[string]string { + return make(map[string]string) +} + +func requests() chan int { + return make(chan int) +} + +// The following example shows how to use Value for periodic program config updates +// and propagation of the changes to worker goroutines. +func ExampleValue_config() { + var config atomic.Value // holds current server configuration + // Create initial config value and store into config. + config.Store(loadConfig()) + go func() { + // Reload config every 10 seconds + // and update config value with the new version. + for { + time.Sleep(10 * time.Second) + config.Store(loadConfig()) + } + }() + // Create worker goroutines that handle incoming requests + // using the latest config value. + for i := 0; i < 10; i++ { + go func() { + for r := range requests() { + c := config.Load() + // Handle request r using config c. + _, _ = r, c + } + }() + } +} + +// The following example shows how to maintain a scalable frequently read, +// but infrequently updated data structure using copy-on-write idiom. +func ExampleValue_readMostly() { + type Map map[string]string + var m atomic.Value + m.Store(make(Map)) + var mu sync.Mutex // used only by writers + // read function can be used to read the data without further synchronization + read := func(key string) (val string) { + m1 := m.Load().(Map) + return m1[key] + } + // insert function can be used to update the data without further synchronization + insert := func(key, val string) { + mu.Lock() // synchronize with other potential writers + defer mu.Unlock() + m1 := m.Load().(Map) // load current value of the data structure + m2 := make(Map) // create a new value + for k, v := range m1 { + m2[k] = v // copy all data from the current object to the new one + } + m2[key] = val // do the update that we need + m.Store(m2) // atomically replace the current object with the new one + // At this point all new readers start working with the new version. + // The old version will be garbage collected once the existing readers + // (if any) are done with it. + } + _, _ = read, insert +} diff --git a/libgo/go/sync/atomic/value_test.go b/libgo/go/sync/atomic/value_test.go index fd90451..fd69ba3 100644 --- a/libgo/go/sync/atomic/value_test.go +++ b/libgo/go/sync/atomic/value_test.go @@ -7,10 +7,8 @@ package atomic_test import ( "math/rand" "runtime" - "sync" . "sync/atomic" "testing" - "time" ) func TestValue(t *testing.T) { @@ -133,68 +131,3 @@ func BenchmarkValueRead(b *testing.B) { } }) } - -// The following example shows how to use Value for periodic program config updates -// and propagation of the changes to worker goroutines. -func ExampleValue_config() { - var config Value // holds current server configuration - // Create initial config value and store into config. - config.Store(loadConfig()) - go func() { - // Reload config every 10 seconds - // and update config value with the new version. - for { - time.Sleep(10 * time.Second) - config.Store(loadConfig()) - } - }() - // Create worker goroutines that handle incoming requests - // using the latest config value. - for i := 0; i < 10; i++ { - go func() { - for r := range requests() { - c := config.Load() - // Handle request r using config c. - _, _ = r, c - } - }() - } -} - -func loadConfig() map[string]string { - return make(map[string]string) -} - -func requests() chan int { - return make(chan int) -} - -// The following example shows how to maintain a scalable frequently read, -// but infrequently updated data structure using copy-on-write idiom. -func ExampleValue_readMostly() { - type Map map[string]string - var m Value - m.Store(make(Map)) - var mu sync.Mutex // used only by writers - // read function can be used to read the data without further synchronization - read := func(key string) (val string) { - m1 := m.Load().(Map) - return m1[key] - } - // insert function can be used to update the data without further synchronization - insert := func(key, val string) { - mu.Lock() // synchronize with other potential writers - defer mu.Unlock() - m1 := m.Load().(Map) // load current value of the data structure - m2 := make(Map) // create a new value - for k, v := range m1 { - m2[k] = v // copy all data from the current object to the new one - } - m2[key] = val // do the update that we need - m.Store(m2) // atomically replace the current object with the new one - // At this point all new readers start working with the new version. - // The old version will be garbage collected once the existing readers - // (if any) are done with it. - } - _, _ = read, insert -} diff --git a/libgo/go/sync/cond.go b/libgo/go/sync/cond.go index 3dcbf1c..b254c93 100644 --- a/libgo/go/sync/cond.go +++ b/libgo/go/sync/cond.go @@ -94,4 +94,5 @@ func (c *copyChecker) check() { type noCopy struct{} // Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/libgo/go/sync/cond_test.go b/libgo/go/sync/cond_test.go index 9019f8f..9d0d9ad 100644 --- a/libgo/go/sync/cond_test.go +++ b/libgo/go/sync/cond_test.go @@ -4,9 +4,9 @@ package sync_test import ( - . "sync" - + "reflect" "runtime" + . "sync" "testing" "time" ) @@ -251,7 +251,8 @@ func TestCondCopy(t *testing.T) { }() c := Cond{L: &Mutex{}} c.Signal() - c2 := c + var c2 Cond + reflect.ValueOf(&c2).Elem().Set(reflect.ValueOf(&c).Elem()) // c2 := c, hidden from vet c2.Signal() } diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go index 4e9e819..16a2f92 100644 --- a/libgo/go/sync/rwmutex.go +++ b/libgo/go/sync/rwmutex.go @@ -47,7 +47,7 @@ func (rw *RWMutex) RLock() { } if atomic.AddInt32(&rw.readerCount, 1) < 0 { // A writer is pending, wait for it. - runtime_Semacquire(&rw.readerSem) + runtime_SemacquireMutex(&rw.readerSem, false) } if race.Enabled { race.Enable() @@ -95,7 +95,7 @@ func (rw *RWMutex) Lock() { r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders // Wait for active readers. if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { - runtime_Semacquire(&rw.writerSem) + runtime_SemacquireMutex(&rw.writerSem, false) } if race.Enabled { race.Enable() @@ -114,7 +114,6 @@ func (rw *RWMutex) Unlock() { if race.Enabled { _ = rw.w.state race.Release(unsafe.Pointer(&rw.readerSem)) - race.Release(unsafe.Pointer(&rw.writerSem)) race.Disable() } diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go index 2fa7c3e..99dd400 100644 --- a/libgo/go/sync/waitgroup.go +++ b/libgo/go/sync/waitgroup.go @@ -23,16 +23,17 @@ type WaitGroup struct { // 64-bit value: high 32 bits are counter, low 32 bits are waiter count. // 64-bit atomic operations require 64-bit alignment, but 32-bit // compilers do not ensure it. So we allocate 12 bytes and then use - // the aligned 8 bytes in them as state. - state1 [12]byte - sema uint32 + // the aligned 8 bytes in them as state, and the other 4 as storage + // for the sema. + state1 [3]uint32 } -func (wg *WaitGroup) state() *uint64 { +// state returns pointers to the state and sema fields stored within wg.state1. +func (wg *WaitGroup) state() (statep *uint64, semap *uint32) { if uintptr(unsafe.Pointer(&wg.state1))%8 == 0 { - return (*uint64)(unsafe.Pointer(&wg.state1)) + return (*uint64)(unsafe.Pointer(&wg.state1)), &wg.state1[2] } else { - return (*uint64)(unsafe.Pointer(&wg.state1[4])) + return (*uint64)(unsafe.Pointer(&wg.state1[1])), &wg.state1[0] } } @@ -50,7 +51,7 @@ func (wg *WaitGroup) state() *uint64 { // new Add calls must happen after all previous Wait calls have returned. // See the WaitGroup example. func (wg *WaitGroup) Add(delta int) { - statep := wg.state() + statep, semap := wg.state() if race.Enabled { _ = *statep // trigger nil deref early if delta < 0 { @@ -67,7 +68,7 @@ func (wg *WaitGroup) Add(delta int) { // The first increment must be synchronized with Wait. // Need to model this as a read, because there can be // several concurrent wg.counter transitions from 0. - race.Read(unsafe.Pointer(&wg.sema)) + race.Read(unsafe.Pointer(semap)) } if v < 0 { panic("sync: negative WaitGroup counter") @@ -89,7 +90,7 @@ func (wg *WaitGroup) Add(delta int) { // Reset waiters count to 0. *statep = 0 for ; w != 0; w-- { - runtime_Semrelease(&wg.sema, false) + runtime_Semrelease(semap, false) } } @@ -100,7 +101,7 @@ func (wg *WaitGroup) Done() { // Wait blocks until the WaitGroup counter is zero. func (wg *WaitGroup) Wait() { - statep := wg.state() + statep, semap := wg.state() if race.Enabled { _ = *statep // trigger nil deref early race.Disable() @@ -124,9 +125,9 @@ func (wg *WaitGroup) Wait() { // Need to model this is as a write to race with the read in Add. // As a consequence, can do the write only for the first waiter, // otherwise concurrent Waits will race with each other. - race.Write(unsafe.Pointer(&wg.sema)) + race.Write(unsafe.Pointer(semap)) } - runtime_Semacquire(&wg.sema) + runtime_Semacquire(semap) if *statep != 0 { panic("sync: WaitGroup is reused before previous Wait has returned") } diff --git a/libgo/go/sync/waitgroup_test.go b/libgo/go/sync/waitgroup_test.go index e3e3096..4ab438c 100644 --- a/libgo/go/sync/waitgroup_test.go +++ b/libgo/go/sync/waitgroup_test.go @@ -68,6 +68,21 @@ func TestWaitGroupMisuse(t *testing.T) { t.Fatal("Should panic") } +// pollUntilEqual blocks until v, loaded atomically, is +// equal to the target. +func pollUntilEqual(v *uint32, target uint32) { + for { + for i := 0; i < 1e3; i++ { + if atomic.LoadUint32(v) == target { + return + } + } + // yield to avoid deadlock with the garbage collector + // see issue #20072 + runtime.Gosched() + } +} + func TestWaitGroupMisuse2(t *testing.T) { knownRacy(t) if runtime.NumCPU() <= 4 { @@ -94,9 +109,7 @@ func TestWaitGroupMisuse2(t *testing.T) { done <- recover() }() atomic.AddUint32(&here, 1) - for atomic.LoadUint32(&here) != 3 { - // spin - } + pollUntilEqual(&here, 3) wg.Wait() }() go func() { @@ -104,16 +117,12 @@ func TestWaitGroupMisuse2(t *testing.T) { done <- recover() }() atomic.AddUint32(&here, 1) - for atomic.LoadUint32(&here) != 3 { - // spin - } + pollUntilEqual(&here, 3) wg.Add(1) // This is the bad guy. wg.Done() }() atomic.AddUint32(&here, 1) - for atomic.LoadUint32(&here) != 3 { - // spin - } + pollUntilEqual(&here, 3) wg.Done() for j := 0; j < 2; j++ { if err := <-done; err != nil { |