diff options
Diffstat (limited to 'libgo/go/sync')
-rw-r--r-- | libgo/go/sync/atomic/atomic_test.go | 8 | ||||
-rw-r--r-- | libgo/go/sync/atomic/value.go | 23 | ||||
-rw-r--r-- | libgo/go/sync/atomic/value_test.go | 18 | ||||
-rw-r--r-- | libgo/go/sync/example_pool_test.go | 2 | ||||
-rw-r--r-- | libgo/go/sync/example_test.go | 2 | ||||
-rw-r--r-- | libgo/go/sync/export_test.go | 18 | ||||
-rw-r--r-- | libgo/go/sync/map.go | 45 | ||||
-rw-r--r-- | libgo/go/sync/map_bench_test.go | 4 | ||||
-rw-r--r-- | libgo/go/sync/map_reference_test.go | 58 | ||||
-rw-r--r-- | libgo/go/sync/map_test.go | 72 | ||||
-rw-r--r-- | libgo/go/sync/mutex.go | 24 | ||||
-rw-r--r-- | libgo/go/sync/mutex_test.go | 18 | ||||
-rw-r--r-- | libgo/go/sync/pool.go | 18 | ||||
-rw-r--r-- | libgo/go/sync/pool_test.go | 33 | ||||
-rw-r--r-- | libgo/go/sync/poolqueue.go | 18 | ||||
-rw-r--r-- | libgo/go/sync/runtime2.go | 1 | ||||
-rw-r--r-- | libgo/go/sync/runtime2_lockrank.go | 1 | ||||
-rw-r--r-- | libgo/go/sync/rwmutex.go | 59 | ||||
-rw-r--r-- | libgo/go/sync/rwmutex_test.go | 28 | ||||
-rw-r--r-- | libgo/go/sync/waitgroup.go | 22 | ||||
-rw-r--r-- | libgo/go/sync/waitgroup_test.go | 126 |
21 files changed, 341 insertions, 257 deletions
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go index eadc962..8a53094 100644 --- a/libgo/go/sync/atomic/atomic_test.go +++ b/libgo/go/sync/atomic/atomic_test.go @@ -7,6 +7,7 @@ package atomic_test import ( "fmt" "runtime" + "runtime/debug" "strings" . "sync/atomic" "testing" @@ -31,7 +32,7 @@ const ( ) // Do the 64-bit functions panic? If so, don't bother testing. -var test64err = func() (err interface{}) { +var test64err = func() (err any) { defer func() { err = recover() }() @@ -1196,6 +1197,11 @@ func TestHammerStoreLoad(t *testing.T) { } const procs = 8 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs)) + // Disable the GC because hammerStoreLoadPointer invokes + // write barriers on values that aren't real pointers. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + // Ensure any in-progress GC is finished. + runtime.GC() for _, tt := range tests { c := make(chan int) var val uint64 diff --git a/libgo/go/sync/atomic/value.go b/libgo/go/sync/atomic/value.go index 3500cd2..f18b7ee 100644 --- a/libgo/go/sync/atomic/value.go +++ b/libgo/go/sync/atomic/value.go @@ -14,7 +14,7 @@ import ( // // A Value must not be copied after first use. type Value struct { - v interface{} + v any } // ifaceWords is interface{} internal representation. @@ -25,10 +25,10 @@ type ifaceWords struct { // Load returns the value set by the most recent Store. // It returns nil if there has been no call to Store for this Value. -func (v *Value) Load() (val interface{}) { +func (v *Value) Load() (val any) { vp := (*ifaceWords)(unsafe.Pointer(v)) typ := LoadPointer(&vp.typ) - if typ == nil || uintptr(typ) == ^uintptr(0) { + if typ == nil || typ == unsafe.Pointer(&firstStoreInProgress) { // First store not yet completed. return nil } @@ -39,10 +39,12 @@ func (v *Value) Load() (val interface{}) { return } +var firstStoreInProgress byte + // Store sets the value of the Value to x. // All calls to Store for a given Value must use values of the same concrete type. // Store of an inconsistent type panics, as does Store(nil). -func (v *Value) Store(val interface{}) { +func (v *Value) Store(val any) { if val == nil { panic("sync/atomic: store of nil value into Value") } @@ -53,10 +55,9 @@ func (v *Value) Store(val interface{}) { if typ == nil { // Attempt to start first store. // Disable preemption so that other goroutines can use - // active spin wait to wait for completion; and so that - // GC does not see the fake type accidentally. + // active spin wait to wait for completion. runtime_procPin() - if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(^uintptr(0))) { + if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(&firstStoreInProgress)) { runtime_procUnpin() continue } @@ -66,7 +67,7 @@ func (v *Value) Store(val interface{}) { runtime_procUnpin() return } - if uintptr(typ) == ^uintptr(0) { + if typ == unsafe.Pointer(&firstStoreInProgress) { // First store in progress. Wait. // Since we disable preemption around the first store, // we can wait with active spinning. @@ -86,7 +87,7 @@ func (v *Value) Store(val interface{}) { // // All calls to Swap for a given Value must use values of the same concrete // type. Swap of an inconsistent type panics, as does Swap(nil). -func (v *Value) Swap(new interface{}) (old interface{}) { +func (v *Value) Swap(new any) (old any) { if new == nil { panic("sync/atomic: swap of nil value into Value") } @@ -131,7 +132,7 @@ func (v *Value) Swap(new interface{}) (old interface{}) { // All calls to CompareAndSwap for a given Value must use values of the same // concrete type. CompareAndSwap of an inconsistent type panics, as does // CompareAndSwap(old, nil). -func (v *Value) CompareAndSwap(old, new interface{}) (swapped bool) { +func (v *Value) CompareAndSwap(old, new any) (swapped bool) { if new == nil { panic("sync/atomic: compare and swap of nil value into Value") } @@ -178,7 +179,7 @@ func (v *Value) CompareAndSwap(old, new interface{}) (swapped bool) { // CompareAndSwapPointer below only ensures vp.data // has not changed since LoadPointer. data := LoadPointer(&vp.data) - var i interface{} + var i any (*ifaceWords)(unsafe.Pointer(&i)).typ = typ (*ifaceWords)(unsafe.Pointer(&i)).data = data if i != old { diff --git a/libgo/go/sync/atomic/value_test.go b/libgo/go/sync/atomic/value_test.go index a5e717d..721da96 100644 --- a/libgo/go/sync/atomic/value_test.go +++ b/libgo/go/sync/atomic/value_test.go @@ -80,7 +80,7 @@ func TestValuePanic(t *testing.T) { } func TestValueConcurrent(t *testing.T) { - tests := [][]interface{}{ + tests := [][]any{ {uint16(0), ^uint16(0), uint16(1 + 2<<8), uint16(3 + 4<<8)}, {uint32(0), ^uint32(0), uint32(1 + 2<<16), uint32(3 + 4<<16)}, {uint64(0), ^uint64(0), uint64(1 + 2<<32), uint64(3 + 4<<32)}, @@ -138,10 +138,10 @@ func BenchmarkValueRead(b *testing.B) { } var Value_SwapTests = []struct { - init interface{} - new interface{} - want interface{} - err interface{} + init any + new any + want any + err any }{ {init: nil, new: nil, err: "sync/atomic: swap of nil value into Value"}, {init: nil, new: true, want: nil, err: nil}, @@ -207,11 +207,11 @@ func TestValueSwapConcurrent(t *testing.T) { var heapA, heapB = struct{ uint }{0}, struct{ uint }{0} var Value_CompareAndSwapTests = []struct { - init interface{} - new interface{} - old interface{} + init any + new any + old any want bool - err interface{} + err any }{ {init: nil, new: nil, old: nil, err: "sync/atomic: compare and swap of nil value into Value"}, {init: nil, new: true, old: "", err: "sync/atomic: compare and swap of inconsistently typed values into Value"}, diff --git a/libgo/go/sync/example_pool_test.go b/libgo/go/sync/example_pool_test.go index 8288d41..2fb4c1e 100644 --- a/libgo/go/sync/example_pool_test.go +++ b/libgo/go/sync/example_pool_test.go @@ -13,7 +13,7 @@ import ( ) var bufPool = sync.Pool{ - New: func() interface{} { + New: func() any { // The Pool's New function should generally only return pointer // types, since a pointer can be put into the return interface // value without an allocation: diff --git a/libgo/go/sync/example_test.go b/libgo/go/sync/example_test.go index bdd3af6..f009a68 100644 --- a/libgo/go/sync/example_test.go +++ b/libgo/go/sync/example_test.go @@ -22,7 +22,7 @@ func ExampleWaitGroup() { var urls = []string{ "http://www.golang.org/", "http://www.google.com/", - "http://www.somestupidname.com/", + "http://www.example.com/", } for _, url := range urls { // Increment the WaitGroup counter. diff --git a/libgo/go/sync/export_test.go b/libgo/go/sync/export_test.go index ffbe567..c020ef7 100644 --- a/libgo/go/sync/export_test.go +++ b/libgo/go/sync/export_test.go @@ -12,9 +12,9 @@ var Runtime_procUnpin = runtime_procUnpin // poolDequeue testing. type PoolDequeue interface { - PushHead(val interface{}) bool - PopHead() (interface{}, bool) - PopTail() (interface{}, bool) + PushHead(val any) bool + PopHead() (any, bool) + PopTail() (any, bool) } func NewPoolDequeue(n int) PoolDequeue { @@ -27,15 +27,15 @@ func NewPoolDequeue(n int) PoolDequeue { return d } -func (d *poolDequeue) PushHead(val interface{}) bool { +func (d *poolDequeue) PushHead(val any) bool { return d.pushHead(val) } -func (d *poolDequeue) PopHead() (interface{}, bool) { +func (d *poolDequeue) PopHead() (any, bool) { return d.popHead() } -func (d *poolDequeue) PopTail() (interface{}, bool) { +func (d *poolDequeue) PopTail() (any, bool) { return d.popTail() } @@ -43,15 +43,15 @@ func NewPoolChain() PoolDequeue { return new(poolChain) } -func (c *poolChain) PushHead(val interface{}) bool { +func (c *poolChain) PushHead(val any) bool { c.pushHead(val) return true } -func (c *poolChain) PopHead() (interface{}, bool) { +func (c *poolChain) PopHead() (any, bool) { return c.popHead() } -func (c *poolChain) PopTail() (interface{}, bool) { +func (c *poolChain) PopTail() (any, bool) { return c.popTail() } diff --git a/libgo/go/sync/map.go b/libgo/go/sync/map.go index dfb62dd..2fa3253 100644 --- a/libgo/go/sync/map.go +++ b/libgo/go/sync/map.go @@ -48,7 +48,7 @@ type Map struct { // // If the dirty map is nil, the next write to the map will initialize it by // making a shallow copy of the clean map, omitting stale entries. - dirty map[interface{}]*entry + dirty map[any]*entry // misses counts the number of loads since the read map was last updated that // needed to lock mu to determine whether the key was present. @@ -61,13 +61,13 @@ type Map struct { // readOnly is an immutable struct stored atomically in the Map.read field. type readOnly struct { - m map[interface{}]*entry + m map[any]*entry amended bool // true if the dirty map contains some key not in m. } // expunged is an arbitrary pointer that marks entries which have been deleted // from the dirty map. -var expunged = unsafe.Pointer(new(interface{})) +var expunged = unsafe.Pointer(new(any)) // An entry is a slot in the map corresponding to a particular key. type entry struct { @@ -93,14 +93,14 @@ type entry struct { p unsafe.Pointer // *interface{} } -func newEntry(i interface{}) *entry { +func newEntry(i any) *entry { return &entry{p: unsafe.Pointer(&i)} } // Load returns the value stored in the map for a key, or nil if no // value is present. // The ok result indicates whether value was found in the map. -func (m *Map) Load(key interface{}) (value interface{}, ok bool) { +func (m *Map) Load(key any) (value any, ok bool) { read, _ := m.read.Load().(readOnly) e, ok := read.m[key] if !ok && read.amended { @@ -125,16 +125,16 @@ func (m *Map) Load(key interface{}) (value interface{}, ok bool) { return e.load() } -func (e *entry) load() (value interface{}, ok bool) { +func (e *entry) load() (value any, ok bool) { p := atomic.LoadPointer(&e.p) if p == nil || p == expunged { return nil, false } - return *(*interface{})(p), true + return *(*any)(p), true } // Store sets the value for a key. -func (m *Map) Store(key, value interface{}) { +func (m *Map) Store(key, value any) { read, _ := m.read.Load().(readOnly) if e, ok := read.m[key]; ok && e.tryStore(&value) { return @@ -167,7 +167,7 @@ func (m *Map) Store(key, value interface{}) { // // If the entry is expunged, tryStore returns false and leaves the entry // unchanged. -func (e *entry) tryStore(i *interface{}) bool { +func (e *entry) tryStore(i *any) bool { for { p := atomic.LoadPointer(&e.p) if p == expunged { @@ -190,14 +190,14 @@ func (e *entry) unexpungeLocked() (wasExpunged bool) { // storeLocked unconditionally stores a value to the entry. // // The entry must be known not to be expunged. -func (e *entry) storeLocked(i *interface{}) { +func (e *entry) storeLocked(i *any) { atomic.StorePointer(&e.p, unsafe.Pointer(i)) } // LoadOrStore returns the existing value for the key if present. // Otherwise, it stores and returns the given value. // The loaded result is true if the value was loaded, false if stored. -func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { +func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) { // Avoid locking if it's a clean hit. read, _ := m.read.Load().(readOnly) if e, ok := read.m[key]; ok { @@ -237,13 +237,13 @@ func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bo // // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and // returns with ok==false. -func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { +func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) { p := atomic.LoadPointer(&e.p) if p == expunged { return nil, false, false } if p != nil { - return *(*interface{})(p), true, true + return *(*any)(p), true, true } // Copy the interface after the first load to make this method more amenable @@ -259,14 +259,14 @@ func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bo return nil, false, false } if p != nil { - return *(*interface{})(p), true, true + return *(*any)(p), true, true } } } // LoadAndDelete deletes the value for a key, returning the previous value if any. // The loaded result reports whether the key was present. -func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) { +func (m *Map) LoadAndDelete(key any) (value any, loaded bool) { read, _ := m.read.Load().(readOnly) e, ok := read.m[key] if !ok && read.amended { @@ -290,18 +290,18 @@ func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) { } // Delete deletes the value for a key. -func (m *Map) Delete(key interface{}) { +func (m *Map) Delete(key any) { m.LoadAndDelete(key) } -func (e *entry) delete() (value interface{}, ok bool) { +func (e *entry) delete() (value any, ok bool) { for { p := atomic.LoadPointer(&e.p) if p == nil || p == expunged { return nil, false } if atomic.CompareAndSwapPointer(&e.p, p, nil) { - return *(*interface{})(p), true + return *(*any)(p), true } } } @@ -311,12 +311,13 @@ func (e *entry) delete() (value interface{}, ok bool) { // // Range does not necessarily correspond to any consistent snapshot of the Map's // contents: no key will be visited more than once, but if the value for any key -// is stored or deleted concurrently, Range may reflect any mapping for that key -// from any point during the Range call. +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. // // Range may be O(N) with the number of elements in the map even if f returns // false after a constant number of calls. -func (m *Map) Range(f func(key, value interface{}) bool) { +func (m *Map) Range(f func(key, value any) bool) { // We need to be able to iterate over all of the keys that were already // present at the start of the call to Range. // If read.amended is false, then read.m satisfies that property without @@ -365,7 +366,7 @@ func (m *Map) dirtyLocked() { } read, _ := m.read.Load().(readOnly) - m.dirty = make(map[interface{}]*entry, len(read.m)) + m.dirty = make(map[any]*entry, len(read.m)) for k, e := range read.m { if !e.tryExpungeLocked() { m.dirty[k] = e diff --git a/libgo/go/sync/map_bench_test.go b/libgo/go/sync/map_bench_test.go index cf0a3d7..e7b0e60 100644 --- a/libgo/go/sync/map_bench_test.go +++ b/libgo/go/sync/map_bench_test.go @@ -216,7 +216,7 @@ func BenchmarkRange(b *testing.B) { perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { for ; pb.Next(); i++ { - m.Range(func(_, _ interface{}) bool { return true }) + m.Range(func(_, _ any) bool { return true }) } }, }) @@ -263,7 +263,7 @@ func BenchmarkAdversarialDelete(b *testing.B) { m.Load(i) if i%mapSize == 0 { - m.Range(func(k, _ interface{}) bool { + m.Range(func(k, _ any) bool { m.Delete(k) return false }) diff --git a/libgo/go/sync/map_reference_test.go b/libgo/go/sync/map_reference_test.go index d105a24..1122b40 100644 --- a/libgo/go/sync/map_reference_test.go +++ b/libgo/go/sync/map_reference_test.go @@ -13,43 +13,43 @@ import ( // mapInterface is the interface Map implements. type mapInterface interface { - Load(interface{}) (interface{}, bool) - Store(key, value interface{}) - LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) - LoadAndDelete(key interface{}) (value interface{}, loaded bool) - Delete(interface{}) - Range(func(key, value interface{}) (shouldContinue bool)) + Load(any) (any, bool) + Store(key, value any) + LoadOrStore(key, value any) (actual any, loaded bool) + LoadAndDelete(key any) (value any, loaded bool) + Delete(any) + Range(func(key, value any) (shouldContinue bool)) } // RWMutexMap is an implementation of mapInterface using a sync.RWMutex. type RWMutexMap struct { mu sync.RWMutex - dirty map[interface{}]interface{} + dirty map[any]any } -func (m *RWMutexMap) Load(key interface{}) (value interface{}, ok bool) { +func (m *RWMutexMap) Load(key any) (value any, ok bool) { m.mu.RLock() value, ok = m.dirty[key] m.mu.RUnlock() return } -func (m *RWMutexMap) Store(key, value interface{}) { +func (m *RWMutexMap) Store(key, value any) { m.mu.Lock() if m.dirty == nil { - m.dirty = make(map[interface{}]interface{}) + m.dirty = make(map[any]any) } m.dirty[key] = value m.mu.Unlock() } -func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { +func (m *RWMutexMap) LoadOrStore(key, value any) (actual any, loaded bool) { m.mu.Lock() actual, loaded = m.dirty[key] if !loaded { actual = value if m.dirty == nil { - m.dirty = make(map[interface{}]interface{}) + m.dirty = make(map[any]any) } m.dirty[key] = value } @@ -57,7 +57,7 @@ func (m *RWMutexMap) LoadOrStore(key, value interface{}) (actual interface{}, lo return actual, loaded } -func (m *RWMutexMap) LoadAndDelete(key interface{}) (value interface{}, loaded bool) { +func (m *RWMutexMap) LoadAndDelete(key any) (value any, loaded bool) { m.mu.Lock() value, loaded = m.dirty[key] if !loaded { @@ -69,15 +69,15 @@ func (m *RWMutexMap) LoadAndDelete(key interface{}) (value interface{}, loaded b return value, loaded } -func (m *RWMutexMap) Delete(key interface{}) { +func (m *RWMutexMap) Delete(key any) { m.mu.Lock() delete(m.dirty, key) m.mu.Unlock() } -func (m *RWMutexMap) Range(f func(key, value interface{}) (shouldContinue bool)) { +func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) { m.mu.RLock() - keys := make([]interface{}, 0, len(m.dirty)) + keys := make([]any, 0, len(m.dirty)) for k := range m.dirty { keys = append(keys, k) } @@ -102,13 +102,13 @@ type DeepCopyMap struct { clean atomic.Value } -func (m *DeepCopyMap) Load(key interface{}) (value interface{}, ok bool) { - clean, _ := m.clean.Load().(map[interface{}]interface{}) +func (m *DeepCopyMap) Load(key any) (value any, ok bool) { + clean, _ := m.clean.Load().(map[any]any) value, ok = clean[key] return value, ok } -func (m *DeepCopyMap) Store(key, value interface{}) { +func (m *DeepCopyMap) Store(key, value any) { m.mu.Lock() dirty := m.dirty() dirty[key] = value @@ -116,8 +116,8 @@ func (m *DeepCopyMap) Store(key, value interface{}) { m.mu.Unlock() } -func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { - clean, _ := m.clean.Load().(map[interface{}]interface{}) +func (m *DeepCopyMap) LoadOrStore(key, value any) (actual any, loaded bool) { + clean, _ := m.clean.Load().(map[any]any) actual, loaded = clean[key] if loaded { return actual, loaded @@ -125,7 +125,7 @@ func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, l m.mu.Lock() // Reload clean in case it changed while we were waiting on m.mu. - clean, _ = m.clean.Load().(map[interface{}]interface{}) + clean, _ = m.clean.Load().(map[any]any) actual, loaded = clean[key] if !loaded { dirty := m.dirty() @@ -137,7 +137,7 @@ func (m *DeepCopyMap) LoadOrStore(key, value interface{}) (actual interface{}, l return actual, loaded } -func (m *DeepCopyMap) LoadAndDelete(key interface{}) (value interface{}, loaded bool) { +func (m *DeepCopyMap) LoadAndDelete(key any) (value any, loaded bool) { m.mu.Lock() dirty := m.dirty() value, loaded = dirty[key] @@ -147,7 +147,7 @@ func (m *DeepCopyMap) LoadAndDelete(key interface{}) (value interface{}, loaded return } -func (m *DeepCopyMap) Delete(key interface{}) { +func (m *DeepCopyMap) Delete(key any) { m.mu.Lock() dirty := m.dirty() delete(dirty, key) @@ -155,8 +155,8 @@ func (m *DeepCopyMap) Delete(key interface{}) { m.mu.Unlock() } -func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool)) { - clean, _ := m.clean.Load().(map[interface{}]interface{}) +func (m *DeepCopyMap) Range(f func(key, value any) (shouldContinue bool)) { + clean, _ := m.clean.Load().(map[any]any) for k, v := range clean { if !f(k, v) { break @@ -164,9 +164,9 @@ func (m *DeepCopyMap) Range(f func(key, value interface{}) (shouldContinue bool) } } -func (m *DeepCopyMap) dirty() map[interface{}]interface{} { - clean, _ := m.clean.Load().(map[interface{}]interface{}) - dirty := make(map[interface{}]interface{}, len(clean)+1) +func (m *DeepCopyMap) dirty() map[any]any { + clean, _ := m.clean.Load().(map[any]any) + dirty := make(map[any]any, len(clean)+1) for k, v := range clean { dirty[k] = v } diff --git a/libgo/go/sync/map_test.go b/libgo/go/sync/map_test.go index 7f163ca..8352471 100644 --- a/libgo/go/sync/map_test.go +++ b/libgo/go/sync/map_test.go @@ -29,10 +29,10 @@ var mapOps = [...]mapOp{opLoad, opStore, opLoadOrStore, opLoadAndDelete, opDelet // mapCall is a quick.Generator for calls on mapInterface. type mapCall struct { op mapOp - k, v interface{} + k, v any } -func (c mapCall) apply(m mapInterface) (interface{}, bool) { +func (c mapCall) apply(m mapInterface) (any, bool) { switch c.op { case opLoad: return m.Load(c.k) @@ -52,11 +52,11 @@ func (c mapCall) apply(m mapInterface) (interface{}, bool) { } type mapResult struct { - value interface{} + value any ok bool } -func randValue(r *rand.Rand) interface{} { +func randValue(r *rand.Rand) any { b := make([]byte, r.Intn(4)) for i := range b { b[i] = 'a' + byte(rand.Intn(26)) @@ -73,14 +73,14 @@ func (mapCall) Generate(r *rand.Rand, size int) reflect.Value { return reflect.ValueOf(c) } -func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[interface{}]interface{}) { +func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map[any]any) { for _, c := range calls { v, ok := c.apply(m) results = append(results, mapResult{v, ok}) } - final = make(map[interface{}]interface{}) - m.Range(func(k, v interface{}) bool { + final = make(map[any]any) + m.Range(func(k, v any) bool { final[k] = v return true }) @@ -88,15 +88,15 @@ func applyCalls(m mapInterface, calls []mapCall) (results []mapResult, final map return results, final } -func applyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { +func applyMap(calls []mapCall) ([]mapResult, map[any]any) { return applyCalls(new(sync.Map), calls) } -func applyRWMutexMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { +func applyRWMutexMap(calls []mapCall) ([]mapResult, map[any]any) { return applyCalls(new(RWMutexMap), calls) } -func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[interface{}]interface{}) { +func applyDeepCopyMap(calls []mapCall) ([]mapResult, map[any]any) { return applyCalls(new(DeepCopyMap), calls) } @@ -155,7 +155,7 @@ func TestConcurrentRange(t *testing.T) { for n := iters; n > 0; n-- { seen := make(map[int64]bool, mapSize) - m.Range(func(ki, vi interface{}) bool { + m.Range(func(ki, vi any) bool { k, v := ki.(int64), vi.(int64) if v%k != 0 { t.Fatalf("while Storing multiples of %v, Range saw value %v", k, v) @@ -195,3 +195,53 @@ func TestIssue40999(t *testing.T) { runtime.GC() } } + +func TestMapRangeNestedCall(t *testing.T) { // Issue 46399 + var m sync.Map + for i, v := range [3]string{"hello", "world", "Go"} { + m.Store(i, v) + } + m.Range(func(key, value any) bool { + m.Range(func(key, value any) bool { + // We should be able to load the key offered in the Range callback, + // because there are no concurrent Delete involved in this tested map. + if v, ok := m.Load(key); !ok || !reflect.DeepEqual(v, value) { + t.Fatalf("Nested Range loads unexpected value, got %+v want %+v", v, value) + } + + // We didn't keep 42 and a value into the map before, if somehow we loaded + // a value from such a key, meaning there must be an internal bug regarding + // nested range in the Map. + if _, loaded := m.LoadOrStore(42, "dummy"); loaded { + t.Fatalf("Nested Range loads unexpected value, want store a new value") + } + + // Try to Store then LoadAndDelete the corresponding value with the key + // 42 to the Map. In this case, the key 42 and associated value should be + // removed from the Map. Therefore any future range won't observe key 42 + // as we checked in above. + val := "sync.Map" + m.Store(42, val) + if v, loaded := m.LoadAndDelete(42); !loaded || !reflect.DeepEqual(v, val) { + t.Fatalf("Nested Range loads unexpected value, got %v, want %v", v, val) + } + return true + }) + + // Remove key from Map on-the-fly. + m.Delete(key) + return true + }) + + // After a Range of Delete, all keys should be removed and any + // further Range won't invoke the callback. Hence length remains 0. + length := 0 + m.Range(func(key, value any) bool { + length++ + return true + }) + + if length != 0 { + t.Fatalf("Unexpected sync.Map size, got %v want %v", length, 0) + } +} diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go index 3028552..18b2ced 100644 --- a/libgo/go/sync/mutex.go +++ b/libgo/go/sync/mutex.go @@ -81,6 +81,30 @@ func (m *Mutex) Lock() { m.lockSlow() } +// TryLock tries to lock m and reports whether it succeeded. +// +// Note that while correct uses of TryLock do exist, they are rare, +// and use of TryLock is often a sign of a deeper problem +// in a particular use of mutexes. +func (m *Mutex) TryLock() bool { + old := m.state + if old&(mutexLocked|mutexStarving) != 0 { + return false + } + + // There may be a goroutine waiting for the mutex, but we are + // running now and can try to grab the mutex before that + // goroutine wakes up. + if !atomic.CompareAndSwapInt32(&m.state, old, old|mutexLocked) { + return false + } + + if race.Enabled { + race.Acquire(unsafe.Pointer(m)) + } + return true +} + func (m *Mutex) lockSlow() { var waitStartTime int64 starving := false diff --git a/libgo/go/sync/mutex_test.go b/libgo/go/sync/mutex_test.go index 98c1bf2..cca0986 100644 --- a/libgo/go/sync/mutex_test.go +++ b/libgo/go/sync/mutex_test.go @@ -60,6 +60,12 @@ func BenchmarkContendedSemaphore(b *testing.B) { func HammerMutex(m *Mutex, loops int, cdone chan bool) { for i := 0; i < loops; i++ { + if i%3 == 0 { + if m.TryLock() { + m.Unlock() + } + continue + } m.Lock() m.Unlock() } @@ -71,7 +77,19 @@ func TestMutex(t *testing.T) { t.Logf("got mutexrate %d expected 0", n) } defer runtime.SetMutexProfileFraction(0) + m := new(Mutex) + + m.Lock() + if m.TryLock() { + t.Fatalf("TryLock succeeded with mutex locked") + } + m.Unlock() + if !m.TryLock() { + t.Fatalf("TryLock failed with mutex unlocked") + } + m.Unlock() + c := make(chan bool) for i := 0; i < 10; i++ { go HammerMutex(m, 1000, c) diff --git a/libgo/go/sync/pool.go b/libgo/go/sync/pool.go index a892fa7..5add722 100644 --- a/libgo/go/sync/pool.go +++ b/libgo/go/sync/pool.go @@ -53,13 +53,13 @@ type Pool struct { // New optionally specifies a function to generate // a value when Get would otherwise return nil. // It may not be changed concurrently with calls to Get. - New func() interface{} + New func() any } // Local per-P Pool appendix. type poolLocalInternal struct { - private interface{} // Can be used only by the respective P. - shared poolChain // Local P can pushHead/popHead; any P can popTail. + private any // Can be used only by the respective P. + shared poolChain // Local P can pushHead/popHead; any P can popTail. } type poolLocal struct { @@ -71,7 +71,7 @@ type poolLocal struct { } // from runtime -func fastrand() uint32 +func fastrandn(n uint32) uint32 var poolRaceHash [128]uint64 @@ -80,19 +80,19 @@ var poolRaceHash [128]uint64 // directly, for fear of conflicting with other synchronization on that address. // Instead, we hash the pointer to get an index into poolRaceHash. // See discussion on golang.org/cl/31589. -func poolRaceAddr(x interface{}) unsafe.Pointer { +func poolRaceAddr(x any) unsafe.Pointer { ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1]) h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16) return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))]) } // Put adds x to the pool. -func (p *Pool) Put(x interface{}) { +func (p *Pool) Put(x any) { if x == nil { return } if race.Enabled { - if fastrand()%4 == 0 { + if fastrandn(4) == 0 { // Randomly drop x on floor. return } @@ -121,7 +121,7 @@ func (p *Pool) Put(x interface{}) { // // If Get would otherwise return nil and p.New is non-nil, Get returns // the result of calling p.New. -func (p *Pool) Get() interface{} { +func (p *Pool) Get() any { if race.Enabled { race.Disable() } @@ -150,7 +150,7 @@ func (p *Pool) Get() interface{} { return x } -func (p *Pool) getSlow(pid int) interface{} { +func (p *Pool) getSlow(pid int) any { // See the comment in pin regarding ordering of the loads. size := runtime_LoadAcquintptr(&p.localSize) // load-acquire locals := p.local // load-consume diff --git a/libgo/go/sync/pool_test.go b/libgo/go/sync/pool_test.go index 9371644..506c367 100644 --- a/libgo/go/sync/pool_test.go +++ b/libgo/go/sync/pool_test.go @@ -4,7 +4,6 @@ // Pool is no-op under race detector, so all these tests do not work. //go:build !race -// +build !race package sync_test @@ -65,7 +64,7 @@ func TestPoolNew(t *testing.T) { i := 0 p := Pool{ - New: func() interface{} { + New: func() any { i++ return i }, @@ -145,7 +144,7 @@ func TestPoolStress(t *testing.T) { done := make(chan bool) for i := 0; i < P; i++ { go func() { - var v interface{} = 0 + var v any = 0 for j := 0; j < N; j++ { if v == nil { v = 0 @@ -272,7 +271,27 @@ func BenchmarkPoolOverflow(b *testing.B) { }) } -var globalSink interface{} +// Simulate object starvation in order to force Ps to steal objects +// from other Ps. +func BenchmarkPoolStarvation(b *testing.B) { + var p Pool + count := 100 + // Reduce number of putted objects by 33 %. It creates objects starvation + // that force P-local storage to steal objects from other Ps. + countStarved := count - int(float32(count)*0.33) + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + for b := 0; b < countStarved; b++ { + p.Put(1) + } + for b := 0; b < count; b++ { + p.Get() + } + } + }) +} + +var globalSink any func BenchmarkPoolSTW(b *testing.B) { // Take control of GC. @@ -285,7 +304,7 @@ func BenchmarkPoolSTW(b *testing.B) { for i := 0; i < b.N; i++ { // Put a large number of items into a pool. const N = 100000 - var item interface{} = 42 + var item any = 42 for i := 0; i < N; i++ { p.Put(item) } @@ -320,7 +339,7 @@ func BenchmarkPoolExpensiveNew(b *testing.B) { // Create a pool that's "expensive" to fill. var p Pool var nNew uint64 - p.New = func() interface{} { + p.New = func() any { atomic.AddUint64(&nNew, 1) time.Sleep(time.Millisecond) return 42 @@ -330,7 +349,7 @@ func BenchmarkPoolExpensiveNew(b *testing.B) { b.RunParallel(func(pb *testing.PB) { // Simulate 100X the number of goroutines having items // checked out from the Pool simultaneously. - items := make([]interface{}, 100) + items := make([]any, 100) var sink []byte for pb.Next() { // Stress the pool. diff --git a/libgo/go/sync/poolqueue.go b/libgo/go/sync/poolqueue.go index 9be83e9..631f2c1 100644 --- a/libgo/go/sync/poolqueue.go +++ b/libgo/go/sync/poolqueue.go @@ -77,7 +77,7 @@ func (d *poolDequeue) pack(head, tail uint32) uint64 { // pushHead adds val at the head of the queue. It returns false if the // queue is full. It must only be called by a single producer. -func (d *poolDequeue) pushHead(val interface{}) bool { +func (d *poolDequeue) pushHead(val any) bool { ptrs := atomic.LoadUint64(&d.headTail) head, tail := d.unpack(ptrs) if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head { @@ -98,7 +98,7 @@ func (d *poolDequeue) pushHead(val interface{}) bool { if val == nil { val = dequeueNil(nil) } - *(*interface{})(unsafe.Pointer(slot)) = val + *(*any)(unsafe.Pointer(slot)) = val // Increment head. This passes ownership of slot to popTail // and acts as a store barrier for writing the slot. @@ -109,7 +109,7 @@ func (d *poolDequeue) pushHead(val interface{}) bool { // popHead removes and returns the element at the head of the queue. // It returns false if the queue is empty. It must only be called by a // single producer. -func (d *poolDequeue) popHead() (interface{}, bool) { +func (d *poolDequeue) popHead() (any, bool) { var slot *eface for { ptrs := atomic.LoadUint64(&d.headTail) @@ -131,7 +131,7 @@ func (d *poolDequeue) popHead() (interface{}, bool) { } } - val := *(*interface{})(unsafe.Pointer(slot)) + val := *(*any)(unsafe.Pointer(slot)) if val == dequeueNil(nil) { val = nil } @@ -144,7 +144,7 @@ func (d *poolDequeue) popHead() (interface{}, bool) { // popTail removes and returns the element at the tail of the queue. // It returns false if the queue is empty. It may be called by any // number of consumers. -func (d *poolDequeue) popTail() (interface{}, bool) { +func (d *poolDequeue) popTail() (any, bool) { var slot *eface for { ptrs := atomic.LoadUint64(&d.headTail) @@ -166,7 +166,7 @@ func (d *poolDequeue) popTail() (interface{}, bool) { } // We now own slot. - val := *(*interface{})(unsafe.Pointer(slot)) + val := *(*any)(unsafe.Pointer(slot)) if val == dequeueNil(nil) { val = nil } @@ -225,7 +225,7 @@ func loadPoolChainElt(pp **poolChainElt) *poolChainElt { return (*poolChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp)))) } -func (c *poolChain) pushHead(val interface{}) { +func (c *poolChain) pushHead(val any) { d := c.head if d == nil { // Initialize the chain. @@ -255,7 +255,7 @@ func (c *poolChain) pushHead(val interface{}) { d2.pushHead(val) } -func (c *poolChain) popHead() (interface{}, bool) { +func (c *poolChain) popHead() (any, bool) { d := c.head for d != nil { if val, ok := d.popHead(); ok { @@ -268,7 +268,7 @@ func (c *poolChain) popHead() (interface{}, bool) { return nil, false } -func (c *poolChain) popTail() (interface{}, bool) { +func (c *poolChain) popTail() (any, bool) { d := loadPoolChainElt(&c.tail) if d == nil { return nil, false diff --git a/libgo/go/sync/runtime2.go b/libgo/go/sync/runtime2.go index c4b4489..9b7e992 100644 --- a/libgo/go/sync/runtime2.go +++ b/libgo/go/sync/runtime2.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !goexperiment.staticlockranking -// +build !goexperiment.staticlockranking package sync diff --git a/libgo/go/sync/runtime2_lockrank.go b/libgo/go/sync/runtime2_lockrank.go index e91fdb6..cdb1af4 100644 --- a/libgo/go/sync/runtime2_lockrank.go +++ b/libgo/go/sync/runtime2_lockrank.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build goexperiment.staticlockranking -// +build goexperiment.staticlockranking package sync diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go index 3012b55..f0d4c97 100644 --- a/libgo/go/sync/rwmutex.go +++ b/libgo/go/sync/rwmutex.go @@ -68,6 +68,34 @@ func (rw *RWMutex) RLock() { } } +// TryRLock tries to lock rw for reading and reports whether it succeeded. +// +// Note that while correct uses of TryRLock do exist, they are rare, +// and use of TryRLock is often a sign of a deeper problem +// in a particular use of mutexes. +func (rw *RWMutex) TryRLock() bool { + if race.Enabled { + _ = rw.w.state + race.Disable() + } + for { + c := atomic.LoadInt32(&rw.readerCount) + if c < 0 { + if race.Enabled { + race.Enable() + } + return false + } + if atomic.CompareAndSwapInt32(&rw.readerCount, c, c+1) { + if race.Enabled { + race.Enable() + race.Acquire(unsafe.Pointer(&rw.readerSem)) + } + return true + } + } +} + // RUnlock undoes a single RLock call; // it does not affect other simultaneous readers. // It is a run-time error if rw is not locked for reading @@ -122,6 +150,37 @@ func (rw *RWMutex) Lock() { } } +// TryLock tries to lock rw for writing and reports whether it succeeded. +// +// Note that while correct uses of TryLock do exist, they are rare, +// and use of TryLock is often a sign of a deeper problem +// in a particular use of mutexes. +func (rw *RWMutex) TryLock() bool { + if race.Enabled { + _ = rw.w.state + race.Disable() + } + if !rw.w.TryLock() { + if race.Enabled { + race.Enable() + } + return false + } + if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) { + rw.w.Unlock() + if race.Enabled { + race.Enable() + } + return false + } + if race.Enabled { + race.Enable() + race.Acquire(unsafe.Pointer(&rw.readerSem)) + race.Acquire(unsafe.Pointer(&rw.writerSem)) + } + return true +} + // Unlock unlocks rw for writing. It is a run-time error if rw is // not locked for writing on entry to Unlock. // diff --git a/libgo/go/sync/rwmutex_test.go b/libgo/go/sync/rwmutex_test.go index c98e69f..dfbdd9b 100644 --- a/libgo/go/sync/rwmutex_test.go +++ b/libgo/go/sync/rwmutex_test.go @@ -108,6 +108,34 @@ func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) { } func TestRWMutex(t *testing.T) { + var m RWMutex + + m.Lock() + if m.TryLock() { + t.Fatalf("TryLock succeeded with mutex locked") + } + if m.TryRLock() { + t.Fatalf("TryRLock succeeded with mutex locked") + } + m.Unlock() + + if !m.TryLock() { + t.Fatalf("TryLock failed with mutex unlocked") + } + m.Unlock() + + if !m.TryRLock() { + t.Fatalf("TryRLock failed with mutex unlocked") + } + if !m.TryRLock() { + t.Fatalf("TryRLock failed with mutex rlocked") + } + if m.TryLock() { + t.Fatalf("TryLock succeeded with mutex rlocked") + } + m.RUnlock() + m.RUnlock() + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) n := 1000 if testing.Short() { diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go index e81a493..9c6662d 100644 --- a/libgo/go/sync/waitgroup.go +++ b/libgo/go/sync/waitgroup.go @@ -22,18 +22,24 @@ type WaitGroup struct { // 64-bit value: high 32 bits are counter, low 32 bits are waiter count. // 64-bit atomic operations require 64-bit alignment, but 32-bit - // compilers do not ensure it. So we allocate 12 bytes and then use - // the aligned 8 bytes in them as state, and the other 4 as storage - // for the sema. - state1 [3]uint32 + // compilers only guarantee that 64-bit fields are 32-bit aligned. + // For this reason on 32 bit architectures we need to check in state() + // if state1 is aligned or not, and dynamically "swap" the field order if + // needed. + state1 uint64 + state2 uint32 } -// state returns pointers to the state and sema fields stored within wg.state1. +// state returns pointers to the state and sema fields stored within wg.state*. func (wg *WaitGroup) state() (statep *uint64, semap *uint32) { - if uintptr(unsafe.Pointer(&wg.state1))%8 == 0 { - return (*uint64)(unsafe.Pointer(&wg.state1)), &wg.state1[2] + if unsafe.Alignof(wg.state1) == 8 || uintptr(unsafe.Pointer(&wg.state1))%8 == 0 { + // state1 is 64-bit aligned: nothing to do. + return &wg.state1, &wg.state2 } else { - return (*uint64)(unsafe.Pointer(&wg.state1[1])), &wg.state1[0] + // state1 is 32-bit aligned but not 64-bit aligned: this means that + // (&state1)+4 is 64-bit aligned. + state := (*[3]uint32)(unsafe.Pointer(&wg.state1)) + return (*uint64)(unsafe.Pointer(&state[1])), &state[0] } } diff --git a/libgo/go/sync/waitgroup_test.go b/libgo/go/sync/waitgroup_test.go index c569e0f..4ded218 100644 --- a/libgo/go/sync/waitgroup_test.go +++ b/libgo/go/sync/waitgroup_test.go @@ -5,8 +5,6 @@ package sync_test import ( - "internal/race" - "runtime" . "sync" "sync/atomic" "testing" @@ -48,12 +46,6 @@ func TestWaitGroup(t *testing.T) { } } -func knownRacy(t *testing.T) { - if race.Enabled { - t.Skip("skipping known-racy test under the race detector") - } -} - func TestWaitGroupMisuse(t *testing.T) { defer func() { err := recover() @@ -68,124 +60,6 @@ func TestWaitGroupMisuse(t *testing.T) { t.Fatal("Should panic") } -// pollUntilEqual blocks until v, loaded atomically, is -// equal to the target. -func pollUntilEqual(v *uint32, target uint32) { - for { - for i := 0; i < 1e3; i++ { - if atomic.LoadUint32(v) == target { - return - } - } - // yield to avoid deadlock with the garbage collector - // see issue #20072 - runtime.Gosched() - } -} - -func TestWaitGroupMisuse2(t *testing.T) { - knownRacy(t) - if runtime.NumCPU() <= 4 { - t.Skip("NumCPU<=4, skipping: this test requires parallelism") - } - defer func() { - err := recover() - if err != "sync: negative WaitGroup counter" && - err != "sync: WaitGroup misuse: Add called concurrently with Wait" && - err != "sync: WaitGroup is reused before previous Wait has returned" { - t.Fatalf("Unexpected panic: %#v", err) - } - }() - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) - done := make(chan interface{}, 2) - // The detection is opportunistic, so we want it to panic - // at least in one run out of a million. - for i := 0; i < 1e6; i++ { - var wg WaitGroup - var here uint32 - wg.Add(1) - go func() { - defer func() { - done <- recover() - }() - atomic.AddUint32(&here, 1) - pollUntilEqual(&here, 3) - wg.Wait() - }() - go func() { - defer func() { - done <- recover() - }() - atomic.AddUint32(&here, 1) - pollUntilEqual(&here, 3) - wg.Add(1) // This is the bad guy. - wg.Done() - }() - atomic.AddUint32(&here, 1) - pollUntilEqual(&here, 3) - wg.Done() - for j := 0; j < 2; j++ { - if err := <-done; err != nil { - panic(err) - } - } - } - t.Fatal("Should panic") -} - -func TestWaitGroupMisuse3(t *testing.T) { - knownRacy(t) - if runtime.NumCPU() <= 1 { - t.Skip("NumCPU==1, skipping: this test requires parallelism") - } - defer func() { - err := recover() - if err != "sync: negative WaitGroup counter" && - err != "sync: WaitGroup misuse: Add called concurrently with Wait" && - err != "sync: WaitGroup is reused before previous Wait has returned" { - t.Fatalf("Unexpected panic: %#v", err) - } - }() - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4)) - done := make(chan interface{}, 3) - // The detection is opportunistically, so we want it to panic - // at least in one run out of a million. - for i := 0; i < 1e6; i++ { - var wg WaitGroup - wg.Add(1) - go func() { - defer func() { - done <- recover() - }() - wg.Done() - }() - go func() { - defer func() { - done <- recover() - }() - wg.Wait() - // Start reusing the wg before waiting for the Wait below to return. - wg.Add(1) - go func() { - wg.Done() - }() - wg.Wait() - }() - go func() { - defer func() { - done <- recover() - }() - wg.Wait() - }() - for j := 0; j < 3; j++ { - if err := <-done; err != nil { - panic(err) - } - } - } - t.Fatal("Should panic") -} - func TestWaitGroupRace(t *testing.T) { // Run this test for about 1ms. for i := 0; i < 1000; i++ { |