diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2013-11-06 19:49:01 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2013-11-06 19:49:01 +0000 |
commit | f038dae646bac2b31be98ab592c0e5206d2d96f5 (patch) | |
tree | 39530b071991b2326f881b2a30a2d82d6c133fd6 /libgo/go/sync | |
parent | f20f261304993444741e0f0a14d3147e591bc660 (diff) | |
download | gcc-f038dae646bac2b31be98ab592c0e5206d2d96f5.zip gcc-f038dae646bac2b31be98ab592c0e5206d2d96f5.tar.gz gcc-f038dae646bac2b31be98ab592c0e5206d2d96f5.tar.bz2 |
libgo: Update to October 24 version of master library.
From-SVN: r204466
Diffstat (limited to 'libgo/go/sync')
-rw-r--r-- | libgo/go/sync/atomic/64bit_arm.go | 10 | ||||
-rw-r--r-- | libgo/go/sync/atomic/atomic.c | 129 | ||||
-rw-r--r-- | libgo/go/sync/atomic/atomic_test.go | 397 | ||||
-rw-r--r-- | libgo/go/sync/atomic/doc.go | 31 | ||||
-rw-r--r-- | libgo/go/sync/atomic/race.go | 48 | ||||
-rw-r--r-- | libgo/go/sync/cond.go | 113 | ||||
-rw-r--r-- | libgo/go/sync/cond_test.go | 129 | ||||
-rw-r--r-- | libgo/go/sync/example_test.go | 7 | ||||
-rw-r--r-- | libgo/go/sync/once.go | 4 | ||||
-rw-r--r-- | libgo/go/sync/race.go | 8 | ||||
-rw-r--r-- | libgo/go/sync/race0.go | 6 | ||||
-rw-r--r-- | libgo/go/sync/runtime.go | 18 | ||||
-rw-r--r-- | libgo/go/sync/waitgroup.go | 26 |
13 files changed, 784 insertions, 142 deletions
diff --git a/libgo/go/sync/atomic/64bit_arm.go b/libgo/go/sync/atomic/64bit_arm.go index f070e78..c08f214 100644 --- a/libgo/go/sync/atomic/64bit_arm.go +++ b/libgo/go/sync/atomic/64bit_arm.go @@ -34,3 +34,13 @@ func addUint64(val *uint64, delta uint64) (new uint64) { } return } + +func swapUint64(addr *uint64, new uint64) (old uint64) { + for { + old = *addr + if CompareAndSwapUint64(addr, old, new) { + break + } + } + return +} diff --git a/libgo/go/sync/atomic/atomic.c b/libgo/go/sync/atomic/atomic.c index 32430df..f0ba57b 100644 --- a/libgo/go/sync/atomic/atomic.c +++ b/libgo/go/sync/atomic/atomic.c @@ -8,8 +8,69 @@ #include "runtime.h" +int32_t SwapInt32 (int32_t *, int32_t) + __asm__ (GOSYM_PREFIX "sync_atomic.SwapInt32") + __attribute__ ((no_split_stack)); + +int32_t +SwapInt32 (int32_t *addr, int32_t new) +{ + return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST); +} + +int64_t SwapInt64 (int64_t *, int64_t) + __asm__ (GOSYM_PREFIX "sync_atomic.SwapInt64") + __attribute__ ((no_split_stack)); + +int64_t +SwapInt64 (int64_t *addr, int64_t new) +{ + return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST); +} + +uint32_t SwapUint32 (uint32_t *, uint32_t) + __asm__ (GOSYM_PREFIX "sync_atomic.SwapUint32") + __attribute__ ((no_split_stack)); + +uint32_t +SwapUint32 (uint32_t *addr, uint32_t new) +{ + return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST); +} + +uint64_t SwapUint64 (uint64_t *, uint64_t) + __asm__ (GOSYM_PREFIX "sync_atomic.SwapUint64") + __attribute__ ((no_split_stack)); + +uint64_t +SwapUint64 (uint64_t *addr, uint64_t new) +{ + return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST); +} + +uintptr_t SwapUintptr (uintptr_t *, uintptr_t) + __asm__ (GOSYM_PREFIX "sync_atomic.SwapUintptr") + __attribute__ ((no_split_stack)); + +uintptr_t +SwapUintptr (uintptr_t *addr, uintptr_t new) +{ + return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST); +} + +void *SwapPointer (void **, void *) + __asm__ (GOSYM_PREFIX "sync_atomic.SwapPointer") + __attribute__ ((no_split_stack)); + +void * +SwapPointer (void **addr, void *new) +{ + return __atomic_exchange_n (addr, new, __ATOMIC_SEQ_CST); +} + _Bool CompareAndSwapInt32 (int32_t *, int32_t, int32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapInt32"); + __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapInt32") + __attribute__ ((no_split_stack)); _Bool CompareAndSwapInt32 (int32_t *val, int32_t old, int32_t new) @@ -18,7 +79,8 @@ CompareAndSwapInt32 (int32_t *val, int32_t old, int32_t new) } _Bool CompareAndSwapInt64 (int64_t *, int64_t, int64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapInt64"); + __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapInt64") + __attribute__ ((no_split_stack)); _Bool CompareAndSwapInt64 (int64_t *val, int64_t old, int64_t new) @@ -27,7 +89,8 @@ CompareAndSwapInt64 (int64_t *val, int64_t old, int64_t new) } _Bool CompareAndSwapUint32 (uint32_t *, uint32_t, uint32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUint32"); + __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUint32") + __attribute__ ((no_split_stack)); _Bool CompareAndSwapUint32 (uint32_t *val, uint32_t old, uint32_t new) @@ -36,7 +99,8 @@ CompareAndSwapUint32 (uint32_t *val, uint32_t old, uint32_t new) } _Bool CompareAndSwapUint64 (uint64_t *, uint64_t, uint64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUint64"); + __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUint64") + __attribute__ ((no_split_stack)); _Bool CompareAndSwapUint64 (uint64_t *val, uint64_t old, uint64_t new) @@ -45,7 +109,8 @@ CompareAndSwapUint64 (uint64_t *val, uint64_t old, uint64_t new) } _Bool CompareAndSwapUintptr (uintptr_t *, uintptr_t, uintptr_t) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUintptr"); + __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapUintptr") + __attribute__ ((no_split_stack)); _Bool CompareAndSwapUintptr (uintptr_t *val, uintptr_t old, uintptr_t new) @@ -54,7 +119,8 @@ CompareAndSwapUintptr (uintptr_t *val, uintptr_t old, uintptr_t new) } _Bool CompareAndSwapPointer (void **, void *, void *) - __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapPointer"); + __asm__ (GOSYM_PREFIX "sync_atomic.CompareAndSwapPointer") + __attribute__ ((no_split_stack)); _Bool CompareAndSwapPointer (void **val, void *old, void *new) @@ -63,7 +129,8 @@ CompareAndSwapPointer (void **val, void *old, void *new) } int32_t AddInt32 (int32_t *, int32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddInt32"); + __asm__ (GOSYM_PREFIX "sync_atomic.AddInt32") + __attribute__ ((no_split_stack)); int32_t AddInt32 (int32_t *val, int32_t delta) @@ -72,7 +139,8 @@ AddInt32 (int32_t *val, int32_t delta) } uint32_t AddUint32 (uint32_t *, uint32_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddUint32"); + __asm__ (GOSYM_PREFIX "sync_atomic.AddUint32") + __attribute__ ((no_split_stack)); uint32_t AddUint32 (uint32_t *val, uint32_t delta) @@ -81,7 +149,8 @@ AddUint32 (uint32_t *val, uint32_t delta) } int64_t AddInt64 (int64_t *, int64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddInt64"); + __asm__ (GOSYM_PREFIX "sync_atomic.AddInt64") + __attribute__ ((no_split_stack)); int64_t AddInt64 (int64_t *val, int64_t delta) @@ -90,7 +159,8 @@ AddInt64 (int64_t *val, int64_t delta) } uint64_t AddUint64 (uint64_t *, uint64_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddUint64"); + __asm__ (GOSYM_PREFIX "sync_atomic.AddUint64") + __attribute__ ((no_split_stack)); uint64_t AddUint64 (uint64_t *val, uint64_t delta) @@ -99,7 +169,8 @@ AddUint64 (uint64_t *val, uint64_t delta) } uintptr_t AddUintptr (uintptr_t *, uintptr_t) - __asm__ (GOSYM_PREFIX "sync_atomic.AddUintptr"); + __asm__ (GOSYM_PREFIX "sync_atomic.AddUintptr") + __attribute__ ((no_split_stack)); uintptr_t AddUintptr (uintptr_t *val, uintptr_t delta) @@ -108,7 +179,8 @@ AddUintptr (uintptr_t *val, uintptr_t delta) } int32_t LoadInt32 (int32_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadInt32"); + __asm__ (GOSYM_PREFIX "sync_atomic.LoadInt32") + __attribute__ ((no_split_stack)); int32_t LoadInt32 (int32_t *addr) @@ -122,7 +194,8 @@ LoadInt32 (int32_t *addr) } int64_t LoadInt64 (int64_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadInt64"); + __asm__ (GOSYM_PREFIX "sync_atomic.LoadInt64") + __attribute__ ((no_split_stack)); int64_t LoadInt64 (int64_t *addr) @@ -136,7 +209,8 @@ LoadInt64 (int64_t *addr) } uint32_t LoadUint32 (uint32_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadUint32"); + __asm__ (GOSYM_PREFIX "sync_atomic.LoadUint32") + __attribute__ ((no_split_stack)); uint32_t LoadUint32 (uint32_t *addr) @@ -150,7 +224,8 @@ LoadUint32 (uint32_t *addr) } uint64_t LoadUint64 (uint64_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadUint64"); + __asm__ (GOSYM_PREFIX "sync_atomic.LoadUint64") + __attribute__ ((no_split_stack)); uint64_t LoadUint64 (uint64_t *addr) @@ -164,7 +239,8 @@ LoadUint64 (uint64_t *addr) } uintptr_t LoadUintptr (uintptr_t *addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadUintptr"); + __asm__ (GOSYM_PREFIX "sync_atomic.LoadUintptr") + __attribute__ ((no_split_stack)); uintptr_t LoadUintptr (uintptr_t *addr) @@ -178,7 +254,8 @@ LoadUintptr (uintptr_t *addr) } void *LoadPointer (void **addr) - __asm__ (GOSYM_PREFIX "sync_atomic.LoadPointer"); + __asm__ (GOSYM_PREFIX "sync_atomic.LoadPointer") + __attribute__ ((no_split_stack)); void * LoadPointer (void **addr) @@ -192,7 +269,8 @@ LoadPointer (void **addr) } void StoreInt32 (int32_t *addr, int32_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreInt32"); + __asm__ (GOSYM_PREFIX "sync_atomic.StoreInt32") + __attribute__ ((no_split_stack)); void StoreInt32 (int32_t *addr, int32_t val) @@ -205,7 +283,8 @@ StoreInt32 (int32_t *addr, int32_t val) } void StoreInt64 (int64_t *addr, int64_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreInt64"); + __asm__ (GOSYM_PREFIX "sync_atomic.StoreInt64") + __attribute__ ((no_split_stack)); void StoreInt64 (int64_t *addr, int64_t val) @@ -218,7 +297,8 @@ StoreInt64 (int64_t *addr, int64_t val) } void StoreUint32 (uint32_t *addr, uint32_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreUint32"); + __asm__ (GOSYM_PREFIX "sync_atomic.StoreUint32") + __attribute__ ((no_split_stack)); void StoreUint32 (uint32_t *addr, uint32_t val) @@ -231,7 +311,8 @@ StoreUint32 (uint32_t *addr, uint32_t val) } void StoreUint64 (uint64_t *addr, uint64_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreUint64"); + __asm__ (GOSYM_PREFIX "sync_atomic.StoreUint64") + __attribute__ ((no_split_stack)); void StoreUint64 (uint64_t *addr, uint64_t val) @@ -244,7 +325,8 @@ StoreUint64 (uint64_t *addr, uint64_t val) } void StoreUintptr (uintptr_t *addr, uintptr_t val) - __asm__ (GOSYM_PREFIX "sync_atomic.StoreUintptr"); + __asm__ (GOSYM_PREFIX "sync_atomic.StoreUintptr") + __attribute__ ((no_split_stack)); void StoreUintptr (uintptr_t *addr, uintptr_t val) @@ -257,7 +339,8 @@ StoreUintptr (uintptr_t *addr, uintptr_t val) } void StorePointer (void **addr, void *val) - __asm__ (GOSYM_PREFIX "sync_atomic.StorePointer"); + __asm__ (GOSYM_PREFIX "sync_atomic.StorePointer") + __attribute__ ((no_split_stack)); void StorePointer (void **addr, void *val) diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go index c6c33dc..06dd5f7 100644 --- a/libgo/go/sync/atomic/atomic_test.go +++ b/libgo/go/sync/atomic/atomic_test.go @@ -5,7 +5,9 @@ package atomic_test import ( + "fmt" "runtime" + "strings" . "sync/atomic" "testing" "unsafe" @@ -38,6 +40,142 @@ var test64err = func() (err interface{}) { return nil }() +func TestSwapInt32(t *testing.T) { + var x struct { + before int32 + i int32 + after int32 + } + x.before = magic32 + x.after = magic32 + var j int32 + for delta := int32(1); delta+delta > delta; delta += delta { + k := SwapInt32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapUint32(t *testing.T) { + var x struct { + before uint32 + i uint32 + after uint32 + } + x.before = magic32 + x.after = magic32 + var j uint32 + for delta := uint32(1); delta+delta > delta; delta += delta { + k := SwapUint32(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic32 || x.after != magic32 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32) + } +} + +func TestSwapInt64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before int64 + i int64 + after int64 + } + x.before = magic64 + x.after = magic64 + var j int64 + for delta := int64(1); delta+delta > delta; delta += delta { + k := SwapInt64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestSwapUint64(t *testing.T) { + if test64err != nil { + t.Skipf("Skipping 64-bit tests: %v", test64err) + } + var x struct { + before uint64 + i uint64 + after uint64 + } + x.before = magic64 + x.after = magic64 + var j uint64 + for delta := uint64(1); delta+delta > delta; delta += delta { + k := SwapUint64(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magic64 || x.after != magic64 { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64)) + } +} + +func TestSwapUintptr(t *testing.T) { + var x struct { + before uintptr + i uintptr + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := SwapUintptr(&x.i, delta) + if x.i != delta || k != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + +func TestSwapPointer(t *testing.T) { + var x struct { + before uintptr + i unsafe.Pointer + after uintptr + } + var m uint64 = magic64 + magicptr := uintptr(m) + x.before = magicptr + x.after = magicptr + var j uintptr + for delta := uintptr(1); delta+delta > delta; delta += delta { + k := SwapPointer(&x.i, unsafe.Pointer(delta)) + if uintptr(x.i) != delta || uintptr(k) != j { + t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k) + } + j = delta + } + if x.before != magicptr || x.after != magicptr { + t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr) + } +} + func TestAddInt32(t *testing.T) { var x struct { before int32 @@ -241,7 +379,7 @@ func TestCompareAndSwapInt64(t *testing.T) { } } -func TestCompareAndSwapUint64(t *testing.T) { +func testCompareAndSwapUint64(t *testing.T, cas func(*uint64, uint64, uint64) bool) { if test64err != nil { t.Skipf("Skipping 64-bit tests: %v", test64err) } @@ -254,14 +392,14 @@ func TestCompareAndSwapUint64(t *testing.T) { x.after = magic64 for val := uint64(1); val+val > val; val += val { x.i = val - if !CompareAndSwapUint64(&x.i, val, val+1) { + if !cas(&x.i, val, val+1) { t.Fatalf("should have swapped %#x %#x", val, val+1) } if x.i != val+1 { t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1) } x.i = val + 1 - if CompareAndSwapUint64(&x.i, val, val+2) { + if cas(&x.i, val, val+2) { t.Fatalf("should not have swapped %#x %#x", val, val+2) } if x.i != val+1 { @@ -273,6 +411,10 @@ func TestCompareAndSwapUint64(t *testing.T) { } } +func TestCompareAndSwapUint64(t *testing.T) { + testCompareAndSwapUint64(t, CompareAndSwapUint64) +} + func TestCompareAndSwapUintptr(t *testing.T) { var x struct { before uintptr @@ -608,27 +750,85 @@ func TestStorePointer(t *testing.T) { // uses the atomic operation to add 1 to a value. After running // multiple hammers in parallel, check that we end with the correct // total. - -var hammer32 = []struct { - name string - f func(*uint32, int) -}{ - {"AddInt32", hammerAddInt32}, - {"AddUint32", hammerAddUint32}, - {"AddUintptr", hammerAddUintptr32}, - {"CompareAndSwapInt32", hammerCompareAndSwapInt32}, - {"CompareAndSwapUint32", hammerCompareAndSwapUint32}, - {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr32}, - {"CompareAndSwapPointer", hammerCompareAndSwapPointer32}, +// Swap can't add 1, so it uses a different scheme. +// The functions repeatedly generate a pseudo-random number such that +// low bits are equal to high bits, swap, check that the old value +// has low and high bits equal. + +var hammer32 = map[string]func(*uint32, int){ + "SwapInt32": hammerSwapInt32, + "SwapUint32": hammerSwapUint32, + "SwapUintptr": hammerSwapUintptr32, + "SwapPointer": hammerSwapPointer32, + "AddInt32": hammerAddInt32, + "AddUint32": hammerAddUint32, + "AddUintptr": hammerAddUintptr32, + "CompareAndSwapInt32": hammerCompareAndSwapInt32, + "CompareAndSwapUint32": hammerCompareAndSwapUint32, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr32, + "CompareAndSwapPointer": hammerCompareAndSwapPointer32, } func init() { var v uint64 = 1 << 50 if uintptr(v) != 0 { // 64-bit system; clear uintptr tests - hammer32[2].f = nil - hammer32[5].f = nil - hammer32[6].f = nil + delete(hammer32, "SwapUintptr") + delete(hammer32, "SwapPointer") + delete(hammer32, "AddUintptr") + delete(hammer32, "CompareAndSwapUintptr") + delete(hammer32, "CompareAndSwapPointer") + } +} + +func hammerSwapInt32(uaddr *uint32, count int) { + addr := (*int32)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := uint32(SwapInt32(addr, int32(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapInt32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint32(addr *uint32, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint32(seed+i)<<16 | uint32(seed+i)<<16>>16 + old := SwapUint32(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUint32 is not atomic: %v", old)) + } + } +} + +func hammerSwapUintptr32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := SwapUintptr(addr, new) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } +} + +func hammerSwapPointer32(uaddr *uint32, count int) { + // only safe when uintptr is 32-bit. + // not called on 64-bit systems. + addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<16 | uintptr(seed+i)<<16>>16 + old := uintptr(SwapPointer(addr, unsafe.Pointer(new))) + if old>>16 != old<<16>>16 { + panic(fmt.Sprintf("SwapPointer is not atomic: %v", old)) + } } } @@ -713,47 +913,103 @@ func TestHammer32(t *testing.T) { } defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) - for _, tt := range hammer32 { - if tt.f == nil { - continue - } + for name, testf := range hammer32 { c := make(chan int) var val uint32 for i := 0; i < p; i++ { go func() { - tt.f(&val, n) - c <- 1 + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) }() } for i := 0; i < p; i++ { <-c } - if val != uint32(n)*p { - t.Fatalf("%s: val=%d want %d", tt.name, val, n*p) + if !strings.HasPrefix(name, "Swap") && val != uint32(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) } } } -var hammer64 = []struct { - name string - f func(*uint64, int) -}{ - {"AddInt64", hammerAddInt64}, - {"AddUint64", hammerAddUint64}, - {"AddUintptr", hammerAddUintptr64}, - {"CompareAndSwapInt64", hammerCompareAndSwapInt64}, - {"CompareAndSwapUint64", hammerCompareAndSwapUint64}, - {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr64}, - {"CompareAndSwapPointer", hammerCompareAndSwapPointer64}, +var hammer64 = map[string]func(*uint64, int){ + "SwapInt64": hammerSwapInt64, + "SwapUint64": hammerSwapUint64, + "SwapUintptr": hammerSwapUintptr64, + "SwapPointer": hammerSwapPointer64, + "AddInt64": hammerAddInt64, + "AddUint64": hammerAddUint64, + "AddUintptr": hammerAddUintptr64, + "CompareAndSwapInt64": hammerCompareAndSwapInt64, + "CompareAndSwapUint64": hammerCompareAndSwapUint64, + "CompareAndSwapUintptr": hammerCompareAndSwapUintptr64, + "CompareAndSwapPointer": hammerCompareAndSwapPointer64, } func init() { var v uint64 = 1 << 50 if uintptr(v) == 0 { // 32-bit system; clear uintptr tests - hammer64[2].f = nil - hammer64[5].f = nil - hammer64[6].f = nil + delete(hammer64, "SwapUintptr") + delete(hammer64, "SwapPointer") + delete(hammer64, "AddUintptr") + delete(hammer64, "CompareAndSwapUintptr") + delete(hammer64, "CompareAndSwapPointer") + } +} + +func hammerSwapInt64(uaddr *uint64, count int) { + addr := (*int64)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := uint64(SwapInt64(addr, int64(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapInt64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUint64(addr *uint64, count int) { + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uint64(seed+i)<<32 | uint64(seed+i)<<32>>32 + old := SwapUint64(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUint64 is not atomic: %v", old)) + } + } +} + +func hammerSwapUintptr64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*uintptr)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := SwapUintptr(addr, new) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapUintptr is not atomic: %v", old)) + } + } +} + +func hammerSwapPointer64(uaddr *uint64, count int) { + // only safe when uintptr is 64-bit. + // not called on 32-bit systems. + addr := (*unsafe.Pointer)(unsafe.Pointer(uaddr)) + seed := int(uintptr(unsafe.Pointer(&count))) + for i := 0; i < count; i++ { + new := uintptr(seed+i)<<32 | uintptr(seed+i)<<32>>32 + old := uintptr(SwapPointer(addr, unsafe.Pointer(new))) + if old>>32 != old<<32>>32 { + panic(fmt.Sprintf("SwapPointer is not atomic: %v", old)) + } } } @@ -841,23 +1097,25 @@ func TestHammer64(t *testing.T) { } defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p)) - for _, tt := range hammer64 { - if tt.f == nil { - continue - } + for name, testf := range hammer64 { c := make(chan int) var val uint64 for i := 0; i < p; i++ { go func() { - tt.f(&val, n) - c <- 1 + defer func() { + if err := recover(); err != nil { + t.Error(err.(string)) + } + c <- 1 + }() + testf(&val, n) }() } for i := 0; i < p; i++ { <-c } - if val != uint64(n)*p { - t.Fatalf("%s: val=%d want %d", tt.name, val, n*p) + if !strings.HasPrefix(name, "Swap") && val != uint64(n)*p { + t.Fatalf("%s: val=%d want %d", name, val, n*p) } } } @@ -1205,3 +1463,46 @@ func TestUnaligned64(t *testing.T) { shouldPanic(t, "CompareAndSwapUint64", func() { CompareAndSwapUint64(p, 1, 2) }) shouldPanic(t, "AddUint64", func() { AddUint64(p, 3) }) } + +func TestNilDeref(t *testing.T) { + funcs := [...]func(){ + func() { CompareAndSwapInt32(nil, 0, 0) }, + func() { CompareAndSwapInt64(nil, 0, 0) }, + func() { CompareAndSwapUint32(nil, 0, 0) }, + func() { CompareAndSwapUint64(nil, 0, 0) }, + func() { CompareAndSwapUintptr(nil, 0, 0) }, + func() { CompareAndSwapPointer(nil, nil, nil) }, + func() { SwapInt32(nil, 0) }, + func() { SwapUint32(nil, 0) }, + func() { SwapInt64(nil, 0) }, + func() { SwapUint64(nil, 0) }, + func() { SwapUintptr(nil, 0) }, + func() { SwapPointer(nil, nil) }, + func() { AddInt32(nil, 0) }, + func() { AddUint32(nil, 0) }, + func() { AddInt64(nil, 0) }, + func() { AddUint64(nil, 0) }, + func() { AddUintptr(nil, 0) }, + func() { LoadInt32(nil) }, + func() { LoadInt64(nil) }, + func() { LoadUint32(nil) }, + func() { LoadUint64(nil) }, + func() { LoadUintptr(nil) }, + func() { LoadPointer(nil) }, + func() { StoreInt32(nil, 0) }, + func() { StoreInt64(nil, 0) }, + func() { StoreUint32(nil, 0) }, + func() { StoreUint64(nil, 0) }, + func() { StoreUintptr(nil, 0) }, + func() { StorePointer(nil, nil) }, + } + for _, f := range funcs { + func() { + defer func() { + runtime.GC() + recover() + }() + f() + }() + } +} diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go index 27a12c9..17ba72f 100644 --- a/libgo/go/sync/atomic/doc.go +++ b/libgo/go/sync/atomic/doc.go @@ -13,6 +13,13 @@ // Share memory by communicating; // don't communicate by sharing memory. // +// The swap operation, implemented by the SwapT functions, is the atomic +// equivalent of: +// +// old = *addr +// *addr = new +// return old +// // The compare-and-swap operation, implemented by the CompareAndSwapT // functions, is the atomic equivalent of: // @@ -40,11 +47,31 @@ import ( // BUG(rsc): On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX. // +// On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core. +// // On both ARM and x86-32, it is the caller's responsibility to arrange for 64-bit // alignment of 64-bit words accessed atomically. The first word in a global // variable or in an allocated struct or slice can be relied upon to be // 64-bit aligned. +// SwapInt32 atomically stores new into *addr and returns the previous *addr value. +func SwapInt32(addr *int32, new int32) (old int32) + +// SwapInt64 atomically stores new into *addr and returns the previous *addr value. +func SwapInt64(addr *int64, new int64) (old int64) + +// SwapUint32 atomically stores new into *addr and returns the previous *addr value. +func SwapUint32(addr *uint32, new uint32) (old uint32) + +// SwapUint64 atomically stores new into *addr and returns the previous *addr value. +func SwapUint64(addr *uint64, new uint64) (old uint64) + +// SwapUintptr atomically stores new into *addr and returns the previous *addr value. +func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) + +// SwapPointer atomically stores new into *addr and returns the previous *addr value. +func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) + // CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value. func CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool) @@ -67,12 +94,16 @@ func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) (swapp func AddInt32(addr *int32, delta int32) (new int32) // AddUint32 atomically adds delta to *addr and returns the new value. +// To subtract a signed positive constant value c from x, do AddUint32(&x, ^uint32(c-1)). +// In particular, to decrement x, do AddUint32(&x, ^uint32(0)). func AddUint32(addr *uint32, delta uint32) (new uint32) // AddInt64 atomically adds delta to *addr and returns the new value. func AddInt64(addr *int64, delta int64) (new int64) // AddUint64 atomically adds delta to *addr and returns the new value. +// To subtract a signed positive constant value c from x, do AddUint64(&x, ^uint64(c-1)). +// In particular, to decrement x, do AddUint64(&x, ^uint64(0)). func AddUint64(addr *uint64, delta uint64) (new uint64) // AddUintptr atomically adds delta to *addr and returns the new value. diff --git a/libgo/go/sync/atomic/race.go b/libgo/go/sync/atomic/race.go index 2320b57..6cbbf12 100644 --- a/libgo/go/sync/atomic/race.go +++ b/libgo/go/sync/atomic/race.go @@ -20,6 +20,54 @@ import ( var mtx uint32 = 1 // same for all +func SwapInt32(addr *int32, new int32) (old int32) { + return int32(SwapUint32((*uint32)(unsafe.Pointer(addr)), uint32(new))) +} + +func SwapUint32(addr *uint32, new uint32) (old uint32) { + _ = *addr + runtime.RaceSemacquire(&mtx) + runtime.RaceRead(unsafe.Pointer(addr)) + runtime.RaceAcquire(unsafe.Pointer(addr)) + old = *addr + *addr = new + runtime.RaceReleaseMerge(unsafe.Pointer(addr)) + runtime.RaceSemrelease(&mtx) + return +} + +func SwapInt64(addr *int64, new int64) (old int64) { + return int64(SwapUint64((*uint64)(unsafe.Pointer(addr)), uint64(new))) +} + +func SwapUint64(addr *uint64, new uint64) (old uint64) { + _ = *addr + runtime.RaceSemacquire(&mtx) + runtime.RaceRead(unsafe.Pointer(addr)) + runtime.RaceAcquire(unsafe.Pointer(addr)) + old = *addr + *addr = new + runtime.RaceReleaseMerge(unsafe.Pointer(addr)) + runtime.RaceSemrelease(&mtx) + return +} + +func SwapUintptr(addr *uintptr, new uintptr) (old uintptr) { + return uintptr(SwapPointer((*unsafe.Pointer)(unsafe.Pointer(addr)), unsafe.Pointer(new))) +} + +func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) (old unsafe.Pointer) { + _ = *addr + runtime.RaceSemacquire(&mtx) + runtime.RaceRead(unsafe.Pointer(addr)) + runtime.RaceAcquire(unsafe.Pointer(addr)) + old = *addr + *addr = new + runtime.RaceReleaseMerge(unsafe.Pointer(addr)) + runtime.RaceSemrelease(&mtx) + return +} + func CompareAndSwapInt32(val *int32, old, new int32) bool { return CompareAndSwapUint32((*uint32)(unsafe.Pointer(val)), uint32(old), uint32(new)) } diff --git a/libgo/go/sync/cond.go b/libgo/go/sync/cond.go index 13547a8..9e6bc17 100644 --- a/libgo/go/sync/cond.go +++ b/libgo/go/sync/cond.go @@ -4,6 +4,11 @@ package sync +import ( + "sync/atomic" + "unsafe" +) + // Cond implements a condition variable, a rendezvous point // for goroutines waiting for or announcing the occurrence // of an event. @@ -11,27 +16,16 @@ package sync // Each Cond has an associated Locker L (often a *Mutex or *RWMutex), // which must be held when changing the condition and // when calling the Wait method. +// +// A Cond can be created as part of other structures. +// A Cond must not be copied after first use. type Cond struct { - L Locker // held while observing or changing the condition - m Mutex // held to avoid internal races - - // We must be careful to make sure that when Signal - // releases a semaphore, the corresponding acquire is - // executed by a goroutine that was already waiting at - // the time of the call to Signal, not one that arrived later. - // To ensure this, we segment waiting goroutines into - // generations punctuated by calls to Signal. Each call to - // Signal begins another generation if there are no goroutines - // left in older generations for it to wake. Because of this - // optimization (only begin another generation if there - // are no older goroutines left), we only need to keep track - // of the two most recent generations, which we call old - // and new. - oldWaiters int // number of waiters in old generation... - oldSema *uint32 // ... waiting on this semaphore + // L is held while observing or changing the condition + L Locker - newWaiters int // number of waiters in new generation... - newSema *uint32 // ... waiting on this semaphore + sema syncSema + waiters uint32 // number of waiters + checker copyChecker } // NewCond returns a new Cond with Locker l. @@ -56,22 +50,16 @@ func NewCond(l Locker) *Cond { // c.L.Unlock() // func (c *Cond) Wait() { + c.checker.check() if raceenabled { - _ = c.m.state raceDisable() } - c.m.Lock() - if c.newSema == nil { - c.newSema = new(uint32) - } - s := c.newSema - c.newWaiters++ - c.m.Unlock() + atomic.AddUint32(&c.waiters, 1) if raceenabled { raceEnable() } c.L.Unlock() - runtime_Semacquire(s) + runtime_Syncsemacquire(&c.sema) c.L.Lock() } @@ -80,26 +68,7 @@ func (c *Cond) Wait() { // It is allowed but not required for the caller to hold c.L // during the call. func (c *Cond) Signal() { - if raceenabled { - _ = c.m.state - raceDisable() - } - c.m.Lock() - if c.oldWaiters == 0 && c.newWaiters > 0 { - // Retire old generation; rename new to old. - c.oldWaiters = c.newWaiters - c.oldSema = c.newSema - c.newWaiters = 0 - c.newSema = nil - } - if c.oldWaiters > 0 { - c.oldWaiters-- - runtime_Semrelease(c.oldSema) - } - c.m.Unlock() - if raceenabled { - raceEnable() - } + c.signalImpl(false) } // Broadcast wakes all goroutines waiting on c. @@ -107,27 +76,43 @@ func (c *Cond) Signal() { // It is allowed but not required for the caller to hold c.L // during the call. func (c *Cond) Broadcast() { + c.signalImpl(true) +} + +func (c *Cond) signalImpl(all bool) { + c.checker.check() if raceenabled { - _ = c.m.state raceDisable() } - c.m.Lock() - // Wake both generations. - if c.oldWaiters > 0 { - for i := 0; i < c.oldWaiters; i++ { - runtime_Semrelease(c.oldSema) + for { + old := atomic.LoadUint32(&c.waiters) + if old == 0 { + if raceenabled { + raceEnable() + } + return } - c.oldWaiters = 0 - } - if c.newWaiters > 0 { - for i := 0; i < c.newWaiters; i++ { - runtime_Semrelease(c.newSema) + new := old - 1 + if all { + new = 0 + } + if atomic.CompareAndSwapUint32(&c.waiters, old, new) { + if raceenabled { + raceEnable() + } + runtime_Syncsemrelease(&c.sema, old-new) + return } - c.newWaiters = 0 - c.newSema = nil } - c.m.Unlock() - if raceenabled { - raceEnable() +} + +// copyChecker holds back pointer to itself to detect object copying. +type copyChecker uintptr + +func (c *copyChecker) check() { + if uintptr(*c) != uintptr(unsafe.Pointer(c)) && + !atomic.CompareAndSwapUintptr((*uintptr)(c), 0, uintptr(unsafe.Pointer(c))) && + uintptr(*c) != uintptr(unsafe.Pointer(c)) { + panic("sync.Cond is copied") } } diff --git a/libgo/go/sync/cond_test.go b/libgo/go/sync/cond_test.go index cefacb1..467c806 100644 --- a/libgo/go/sync/cond_test.go +++ b/libgo/go/sync/cond_test.go @@ -5,6 +5,8 @@ package sync_test import ( . "sync" + + "runtime" "testing" ) @@ -124,3 +126,130 @@ func TestCondBroadcast(t *testing.T) { } c.Broadcast() } + +func TestRace(t *testing.T) { + x := 0 + c := NewCond(&Mutex{}) + done := make(chan bool) + go func() { + c.L.Lock() + x = 1 + c.Wait() + if x != 2 { + t.Fatal("want 2") + } + x = 3 + c.Signal() + c.L.Unlock() + done <- true + }() + go func() { + c.L.Lock() + for { + if x == 1 { + x = 2 + c.Signal() + break + } + c.L.Unlock() + runtime.Gosched() + c.L.Lock() + } + c.L.Unlock() + done <- true + }() + go func() { + c.L.Lock() + for { + if x == 2 { + c.Wait() + if x != 3 { + t.Fatal("want 3") + } + break + } + if x == 3 { + break + } + c.L.Unlock() + runtime.Gosched() + c.L.Lock() + } + c.L.Unlock() + done <- true + }() + <-done + <-done + <-done +} + +func TestCondCopy(t *testing.T) { + defer func() { + err := recover() + if err == nil || err.(string) != "sync.Cond is copied" { + t.Fatalf("got %v, expect sync.Cond is copied", err) + } + }() + c := Cond{L: &Mutex{}} + c.Signal() + c2 := c + c2.Signal() +} + +func BenchmarkCond1(b *testing.B) { + benchmarkCond(b, 1) +} + +func BenchmarkCond2(b *testing.B) { + benchmarkCond(b, 2) +} + +func BenchmarkCond4(b *testing.B) { + benchmarkCond(b, 4) +} + +func BenchmarkCond8(b *testing.B) { + benchmarkCond(b, 8) +} + +func BenchmarkCond16(b *testing.B) { + benchmarkCond(b, 16) +} + +func BenchmarkCond32(b *testing.B) { + benchmarkCond(b, 32) +} + +func benchmarkCond(b *testing.B, waiters int) { + c := NewCond(&Mutex{}) + done := make(chan bool) + id := 0 + + for routine := 0; routine < waiters+1; routine++ { + go func() { + for i := 0; i < b.N; i++ { + c.L.Lock() + if id == -1 { + c.L.Unlock() + break + } + id++ + if id == waiters+1 { + id = 0 + c.Broadcast() + } else { + c.Wait() + } + c.L.Unlock() + } + c.L.Lock() + id = -1 + c.Broadcast() + c.L.Unlock() + done <- true + }() + } + for routine := 0; routine < waiters+1; routine++ { + <-done + } +} diff --git a/libgo/go/sync/example_test.go b/libgo/go/sync/example_test.go index 031c87f..bdd3af6 100644 --- a/libgo/go/sync/example_test.go +++ b/libgo/go/sync/example_test.go @@ -6,10 +6,15 @@ package sync_test import ( "fmt" - "net/http" "sync" ) +type httpPkg struct{} + +func (httpPkg) Get(url string) {} + +var http httpPkg + // This example fetches several URLs concurrently, // using a WaitGroup to block until all the fetches are complete. func ExampleWaitGroup() { diff --git a/libgo/go/sync/once.go b/libgo/go/sync/once.go index 1699e86..161ae3b 100644 --- a/libgo/go/sync/once.go +++ b/libgo/go/sync/once.go @@ -14,8 +14,8 @@ type Once struct { done uint32 } -// Do calls the function f if and only if the method is being called for the -// first time with this receiver. In other words, given +// Do calls the function f if and only if Do is being called for the +// first time for this instance of Once. In other words, given // var once Once // if once.Do(f) is called multiple times, only the first call will invoke f, // even if f has a different value in each invocation. A new instance of diff --git a/libgo/go/sync/race.go b/libgo/go/sync/race.go index d9431af..fd0277d 100644 --- a/libgo/go/sync/race.go +++ b/libgo/go/sync/race.go @@ -32,3 +32,11 @@ func raceDisable() { func raceEnable() { runtime.RaceEnable() } + +func raceRead(addr unsafe.Pointer) { + runtime.RaceRead(addr) +} + +func raceWrite(addr unsafe.Pointer) { + runtime.RaceWrite(addr) +} diff --git a/libgo/go/sync/race0.go b/libgo/go/sync/race0.go index bef14f9..65ada1c 100644 --- a/libgo/go/sync/race0.go +++ b/libgo/go/sync/race0.go @@ -26,3 +26,9 @@ func raceDisable() { func raceEnable() { } + +func raceRead(addr unsafe.Pointer) { +} + +func raceWrite(addr unsafe.Pointer) { +} diff --git a/libgo/go/sync/runtime.go b/libgo/go/sync/runtime.go index e99599c..3bf47ea 100644 --- a/libgo/go/sync/runtime.go +++ b/libgo/go/sync/runtime.go @@ -4,6 +4,8 @@ package sync +import "unsafe" + // defined in package runtime // Semacquire waits until *s > 0 and then atomically decrements it. @@ -16,3 +18,19 @@ func runtime_Semacquire(s *uint32) // It is intended as a simple wakeup primitive for use by the synchronization // library and should not be used directly. func runtime_Semrelease(s *uint32) + +// Opaque representation of SyncSema in runtime/sema.goc. +type syncSema [3]uintptr + +// Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s. +func runtime_Syncsemacquire(s *syncSema) + +// Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s. +func runtime_Syncsemrelease(s *syncSema, n uint32) + +// Ensure that sync and runtime agree on size of syncSema. +func runtime_Syncsemcheck(size uintptr) +func init() { + var s syncSema + runtime_Syncsemcheck(unsafe.Sizeof(s)) +} diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go index ca38837..2268111 100644 --- a/libgo/go/sync/waitgroup.go +++ b/libgo/go/sync/waitgroup.go @@ -43,12 +43,23 @@ type WaitGroup struct { // other event to be waited for. See the WaitGroup example. func (wg *WaitGroup) Add(delta int) { if raceenabled { - _ = wg.m.state - raceReleaseMerge(unsafe.Pointer(wg)) + _ = wg.m.state // trigger nil deref early + if delta < 0 { + // Synchronize decrements with Wait. + raceReleaseMerge(unsafe.Pointer(wg)) + } raceDisable() defer raceEnable() } v := atomic.AddInt32(&wg.counter, int32(delta)) + if raceenabled { + if delta > 0 && v == int32(delta) { + // The first increment must be synchronized with Wait. + // Need to model this as a read, because there can be + // several concurrent wg.counter transitions from 0. + raceRead(unsafe.Pointer(&wg.sema)) + } + } if v < 0 { panic("sync: negative WaitGroup counter") } @@ -72,7 +83,7 @@ func (wg *WaitGroup) Done() { // Wait blocks until the WaitGroup counter is zero. func (wg *WaitGroup) Wait() { if raceenabled { - _ = wg.m.state + _ = wg.m.state // trigger nil deref early raceDisable() } if atomic.LoadInt32(&wg.counter) == 0 { @@ -83,7 +94,7 @@ func (wg *WaitGroup) Wait() { return } wg.m.Lock() - atomic.AddInt32(&wg.waiters, 1) + w := atomic.AddInt32(&wg.waiters, 1) // This code is racing with the unlocked path in Add above. // The code above modifies counter and then reads waiters. // We must modify waiters and then read counter (the opposite order) @@ -101,6 +112,13 @@ func (wg *WaitGroup) Wait() { } return } + if raceenabled && w == 1 { + // Wait must be synchronized with the first Add. + // Need to model this is as a write to race with the read in Add. + // As a consequence, can do the write only for the first waiter, + // otherwise concurrent Waits will race with each other. + raceWrite(unsafe.Pointer(&wg.sema)) + } if wg.sema == nil { wg.sema = new(uint32) } |