aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2011-10-26 23:57:58 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2011-10-26 23:57:58 +0000
commitd8f412571f8768df2d3239e72392dfeabbad1559 (patch)
tree19d182df05ead7ff8ba7ee00a7d57555e1383fdf /libgo/go/sync
parente0c39d66d4f0607177b1cf8995dda56a667e07b3 (diff)
downloadgcc-d8f412571f8768df2d3239e72392dfeabbad1559.zip
gcc-d8f412571f8768df2d3239e72392dfeabbad1559.tar.gz
gcc-d8f412571f8768df2d3239e72392dfeabbad1559.tar.bz2
Update Go library to last weekly.
From-SVN: r180552
Diffstat (limited to 'libgo/go/sync')
-rw-r--r--libgo/go/sync/atomic/atomic.c143
-rw-r--r--libgo/go/sync/atomic/atomic_test.go621
-rw-r--r--libgo/go/sync/atomic/doc.go39
3 files changed, 759 insertions, 44 deletions
diff --git a/libgo/go/sync/atomic/atomic.c b/libgo/go/sync/atomic/atomic.c
index 6660a7d..e5de5ee 100644
--- a/libgo/go/sync/atomic/atomic.c
+++ b/libgo/go/sync/atomic/atomic.c
@@ -51,6 +51,15 @@ CompareAndSwapUintptr (uintptr_t *val, uintptr_t old, uintptr_t new)
return __sync_bool_compare_and_swap (val, old, new);
}
+_Bool CompareAndSwapPointer (void **, void *, void *)
+ asm ("libgo_sync.atomic.CompareAndSwapPointer");
+
+_Bool
+CompareAndSwapPointer (void **val, void *old, void *new)
+{
+ return __sync_bool_compare_and_swap (val, old, new);
+}
+
int32_t AddInt32 (int32_t *, int32_t)
asm ("libgo_sync.atomic.AddInt32");
@@ -110,6 +119,20 @@ LoadInt32 (int32_t *addr)
return v;
}
+int64_t LoadInt64 (int64_t *addr)
+ asm ("libgo_sync.atomic.LoadInt64");
+
+int64_t
+LoadInt64 (int64_t *addr)
+{
+ int64_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, v))
+ v = *addr;
+ return v;
+}
+
uint32_t LoadUint32 (uint32_t *addr)
asm ("libgo_sync.atomic.LoadUint32");
@@ -123,3 +146,123 @@ LoadUint32 (uint32_t *addr)
v = *addr;
return v;
}
+
+uint64_t LoadUint64 (uint64_t *addr)
+ asm ("libgo_sync.atomic.LoadUint64");
+
+uint64_t
+LoadUint64 (uint64_t *addr)
+{
+ uint64_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, v))
+ v = *addr;
+ return v;
+}
+
+uintptr_t LoadUintptr (uintptr_t *addr)
+ asm ("libgo_sync.atomic.LoadUintptr");
+
+uintptr_t
+LoadUintptr (uintptr_t *addr)
+{
+ uintptr_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, v))
+ v = *addr;
+ return v;
+}
+
+void *LoadPointer (void **addr)
+ asm ("libgo_sync.atomic.LoadPointer");
+
+void *
+LoadPointer (void **addr)
+{
+ void *v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, v))
+ v = *addr;
+ return v;
+}
+
+void StoreInt32 (int32_t *addr, int32_t val)
+ asm ("libgo_sync.atomic.StoreInt32");
+
+void
+StoreInt32 (int32_t *addr, int32_t val)
+{
+ int32_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, val))
+ v = *addr;
+}
+
+void StoreInt64 (int64_t *addr, int64_t val)
+ asm ("libgo_sync.atomic.StoreInt64");
+
+void
+StoreInt64 (int64_t *addr, int64_t val)
+{
+ int64_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, val))
+ v = *addr;
+}
+
+void StoreUint32 (uint32_t *addr, uint32_t val)
+ asm ("libgo_sync.atomic.StoreUint32");
+
+void
+StoreUint32 (uint32_t *addr, uint32_t val)
+{
+ uint32_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, val))
+ v = *addr;
+}
+
+void StoreUint64 (uint64_t *addr, uint64_t val)
+ asm ("libgo_sync.atomic.StoreUint64");
+
+void
+StoreUint64 (uint64_t *addr, uint64_t val)
+{
+ uint64_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, val))
+ v = *addr;
+}
+
+void StoreUintptr (uintptr_t *addr, uintptr_t val)
+ asm ("libgo_sync.atomic.StoreUintptr");
+
+void
+StoreUintptr (uintptr_t *addr, uintptr_t val)
+{
+ uintptr_t v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, val))
+ v = *addr;
+}
+
+void StorePointer (void **addr, void *val)
+ asm ("libgo_sync.atomic.StorePointer");
+
+void
+StorePointer (void **addr, void *val)
+{
+ void *v;
+
+ v = *addr;
+ while (! __sync_bool_compare_and_swap (addr, v, val))
+ v = *addr;
+}
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go
index 2229e58..02ee24b 100644
--- a/libgo/go/sync/atomic/atomic_test.go
+++ b/libgo/go/sync/atomic/atomic_test.go
@@ -164,17 +164,17 @@ func TestCompareAndSwapInt32(t *testing.T) {
for val := int32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt32(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt32(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
@@ -193,17 +193,17 @@ func TestCompareAndSwapUint32(t *testing.T) {
for val := uint32(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUint32(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUint32(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic32 || x.after != magic32 {
@@ -226,17 +226,17 @@ func TestCompareAndSwapInt64(t *testing.T) {
for val := int64(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapInt64(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapInt64(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
@@ -259,17 +259,17 @@ func TestCompareAndSwapUint64(t *testing.T) {
for val := uint64(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUint64(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUint64(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magic64 || x.after != magic64 {
@@ -290,17 +290,48 @@ func TestCompareAndSwapUintptr(t *testing.T) {
for val := uintptr(1); val+val > val; val += val {
x.i = val
if !CompareAndSwapUintptr(&x.i, val, val+1) {
- t.Errorf("should have swapped %#x %#x", val, val+1)
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
x.i = val + 1
if CompareAndSwapUintptr(&x.i, val, val+2) {
- t.Errorf("should not have swapped %#x %#x", val, val+2)
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
}
if x.i != val+1 {
- t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for val := uintptr(1); val+val > val; val += val {
+ x.i = unsafe.Pointer(val)
+ if !CompareAndSwapPointer(&x.i, unsafe.Pointer(val), unsafe.Pointer(val+1)) {
+ t.Fatalf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != unsafe.Pointer(val+1) {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = unsafe.Pointer(val + 1)
+ if CompareAndSwapPointer(&x.i, unsafe.Pointer(val), unsafe.Pointer(val+2)) {
+ t.Fatalf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != unsafe.Pointer(val+1) {
+ t.Fatalf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
}
}
if x.before != magicptr || x.after != magicptr {
@@ -348,6 +379,236 @@ func TestLoadUint32(t *testing.T) {
}
}
+func TestLoadInt64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ x.before = magic64
+ x.after = magic64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := LoadInt64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestLoadUint64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ x.before = magic64
+ x.after = magic64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := LoadUint64(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestLoadUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := LoadUintptr(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestLoadPointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := LoadPointer(&x.i)
+ if k != x.i {
+ t.Fatalf("delta=%d i=%d k=%d", delta, x.i, k)
+ }
+ x.i = unsafe.Pointer(uintptr(x.i) + delta)
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStoreInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := int32(0)
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ StoreInt32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ v := uint32(0)
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ StoreUint32(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestStoreInt64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ x.before = magic64
+ x.after = magic64
+ v := int64(0)
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ StoreInt64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestStoreUint64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ x.before = magic64
+ x.after = magic64
+ v := uint64(0)
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ StoreUint64(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestStoreUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := uintptr(0)
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ StoreUintptr(&x.i, v)
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v += delta
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestStorePointer(t *testing.T) {
+ var x struct {
+ before uintptr
+ i unsafe.Pointer
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ v := unsafe.Pointer(uintptr(0))
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ StorePointer(&x.i, unsafe.Pointer(v))
+ if x.i != v {
+ t.Fatalf("delta=%d i=%d v=%d", delta, x.i, v)
+ }
+ v = unsafe.Pointer(uintptr(v) + delta)
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
// Tests of correct behavior, with contention.
// (Is the function atomic?)
//
@@ -366,6 +627,7 @@ var hammer32 = []struct {
{"CompareAndSwapInt32", hammerCompareAndSwapInt32},
{"CompareAndSwapUint32", hammerCompareAndSwapUint32},
{"CompareAndSwapUintptr", hammerCompareAndSwapUintptr32},
+ {"CompareAndSwapPointer", hammerCompareAndSwapPointer32},
}
func init() {
@@ -436,6 +698,20 @@ func hammerCompareAndSwapUintptr32(uval *uint32, count int) {
}
}
+func hammerCompareAndSwapPointer32(uval *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ val := (*unsafe.Pointer)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapPointer(val, v, unsafe.Pointer(uintptr(v)+1)) {
+ break
+ }
+ }
+ }
+}
+
func TestHammer32(t *testing.T) {
const p = 4
n := 100000
@@ -460,7 +736,7 @@ func TestHammer32(t *testing.T) {
<-c
}
if val != uint32(n)*p {
- t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
+ t.Fatalf("%s: val=%d want %d", tt.name, val, n*p)
}
}
}
@@ -475,6 +751,7 @@ var hammer64 = []struct {
{"CompareAndSwapInt64", hammerCompareAndSwapInt64},
{"CompareAndSwapUint64", hammerCompareAndSwapUint64},
{"CompareAndSwapUintptr", hammerCompareAndSwapUintptr64},
+ {"CompareAndSwapPointer", hammerCompareAndSwapPointer64},
}
func init() {
@@ -545,6 +822,20 @@ func hammerCompareAndSwapUintptr64(uval *uint64, count int) {
}
}
+func hammerCompareAndSwapPointer64(uval *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ val := (*unsafe.Pointer)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapPointer(val, v, unsafe.Pointer(uintptr(v)+1)) {
+ break
+ }
+ }
+ }
+}
+
func TestHammer64(t *testing.T) {
if test64err != nil {
t.Logf("Skipping 64-bit tests: %v", test64err)
@@ -573,63 +864,141 @@ func TestHammer64(t *testing.T) {
<-c
}
if val != uint64(n)*p {
- t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
+ t.Fatalf("%s: val=%d want %d", tt.name, val, n*p)
}
}
}
-func hammerLoadInt32(t *testing.T, uval *uint32) {
- val := (*int32)(unsafe.Pointer(uval))
- for {
- v := LoadInt32(val)
+func hammerStoreLoadInt32(t *testing.T, valp unsafe.Pointer) {
+ val := (*int32)(valp)
+ v := LoadInt32(val)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Int32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreInt32(val, new)
+}
+
+func hammerStoreLoadUint32(t *testing.T, valp unsafe.Pointer) {
+ val := (*uint32)(valp)
+ v := LoadUint32(val)
+ vlo := v & ((1 << 16) - 1)
+ vhi := v >> 16
+ if vlo != vhi {
+ t.Fatalf("Uint32: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<16
+ if vlo == 1e4 {
+ new = 0
+ }
+ StoreUint32(val, new)
+}
+
+func hammerStoreLoadInt64(t *testing.T, valp unsafe.Pointer) {
+ val := (*int64)(valp)
+ v := LoadInt64(val)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Int64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreInt64(val, new)
+}
+
+func hammerStoreLoadUint64(t *testing.T, valp unsafe.Pointer) {
+ val := (*uint64)(valp)
+ v := LoadUint64(val)
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uint64: %#x != %#x", vlo, vhi)
+ }
+ new := v + 1 + 1<<32
+ StoreUint64(val, new)
+}
+
+func hammerStoreLoadUintptr(t *testing.T, valp unsafe.Pointer) {
+ val := (*uintptr)(valp)
+ var test64 uint64 = 1 << 50
+ arch32 := uintptr(test64) == 0
+ v := LoadUintptr(val)
+ new := v
+ if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
- t.Fatalf("LoadInt32: %#x != %#x", vlo, vhi)
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
- new := v + 1 + 1<<16
+ new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
- if CompareAndSwapInt32(val, v, new) {
- break
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Uintptr: %#x != %#x", vlo, vhi)
}
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
}
+ StoreUintptr(val, new)
}
-func hammerLoadUint32(t *testing.T, val *uint32) {
- for {
- v := LoadUint32(val)
+func hammerStoreLoadPointer(t *testing.T, valp unsafe.Pointer) {
+ val := (*unsafe.Pointer)(valp)
+ var test64 uint64 = 1 << 50
+ arch32 := uintptr(test64) == 0
+ v := uintptr(LoadPointer(val))
+ new := v
+ if arch32 {
vlo := v & ((1 << 16) - 1)
vhi := v >> 16
if vlo != vhi {
- t.Fatalf("LoadUint32: %#x != %#x", vlo, vhi)
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
- new := v + 1 + 1<<16
+ new = v + 1 + 1<<16
if vlo == 1e4 {
new = 0
}
- if CompareAndSwapUint32(val, v, new) {
- break
+ } else {
+ vlo := v & ((1 << 32) - 1)
+ vhi := v >> 32
+ if vlo != vhi {
+ t.Fatalf("Pointer: %#x != %#x", vlo, vhi)
}
+ inc := uint64(1 + 1<<32)
+ new = v + uintptr(inc)
}
+ StorePointer(val, unsafe.Pointer(new))
}
-func TestHammerLoad(t *testing.T) {
- tests := [...]func(*testing.T, *uint32){hammerLoadInt32, hammerLoadUint32}
- n := 100000
+func TestHammerStoreLoad(t *testing.T) {
+ var tests []func(*testing.T, unsafe.Pointer)
+ tests = append(tests, hammerStoreLoadInt32, hammerStoreLoadUint32,
+ hammerStoreLoadUintptr, hammerStoreLoadPointer)
+ if test64err == nil {
+ tests = append(tests, hammerStoreLoadInt64, hammerStoreLoadUint64)
+ }
+ n := int(1e6)
if testing.Short() {
- n = 10000
+ n = int(1e4)
}
const procs = 8
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(procs))
for _, tt := range tests {
c := make(chan int)
- var val uint32
+ var val uint64
for p := 0; p < procs; p++ {
go func() {
for i := 0; i < n; i++ {
- tt(t, &val)
+ tt(t, unsafe.Pointer(&val))
}
c <- 1
}()
@@ -639,3 +1008,169 @@ func TestHammerLoad(t *testing.T) {
}
}
}
+
+func TestStoreLoadSeqCst32(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int32{}
+ ack := [2][3]int32{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int32(1); i < N; i++ {
+ StoreInt32(&X[me], i)
+ my := LoadInt32(&X[he])
+ StoreInt32(&ack[me][i%3], my)
+ for w := 1; LoadInt32(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt32(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Fatalf("invalid values: %d/%d (%d)", my, his, i)
+ }
+ if my != i && his != i {
+ t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ }
+ ack[me][(i-1)%3] = -1
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadSeqCst64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ X := [2]int64{}
+ ack := [2][3]int64{{-1, -1, -1}, {-1, -1, -1}}
+ for p := 0; p < 2; p++ {
+ go func(me int) {
+ he := 1 - me
+ for i := int64(1); i < N; i++ {
+ StoreInt64(&X[me], i)
+ my := LoadInt64(&X[he])
+ StoreInt64(&ack[me][i%3], my)
+ for w := 1; LoadInt64(&ack[he][i%3]) == -1; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ his := LoadInt64(&ack[he][i%3])
+ if (my != i && my != i-1) || (his != i && his != i-1) {
+ t.Fatalf("invalid values: %d/%d (%d)", my, his, i)
+ }
+ if my != i && his != i {
+ t.Fatalf("store/load are not sequentially consistent: %d/%d (%d)", my, his, i)
+ }
+ ack[me][(i-1)%3] = -1
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq32(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int32(1e3)
+ if testing.Short() {
+ N = int32(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int32
+ pad1 [128]int8
+ data1 int32
+ pad2 [128]int8
+ data2 float32
+ }
+ var X Data
+ for p := int32(0); p < 2; p++ {
+ go func(p int32) {
+ for i := int32(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float32(i)
+ StoreInt32(&X.signal, i)
+ } else {
+ for w := 1; LoadInt32(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float32(i) {
+ t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
+
+func TestStoreLoadRelAcq64(t *testing.T) {
+ if test64err != nil {
+ t.Logf("Skipping 64-bit tests: %v", test64err)
+ return
+ }
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
+ N := int64(1e3)
+ if testing.Short() {
+ N = int64(1e2)
+ }
+ c := make(chan bool, 2)
+ type Data struct {
+ signal int64
+ pad1 [128]int8
+ data1 int64
+ pad2 [128]int8
+ data2 float64
+ }
+ var X Data
+ for p := int64(0); p < 2; p++ {
+ go func(p int64) {
+ for i := int64(1); i < N; i++ {
+ if (i+p)%2 == 0 {
+ X.data1 = i
+ X.data2 = float64(i)
+ StoreInt64(&X.signal, i)
+ } else {
+ for w := 1; LoadInt64(&X.signal) != i; w++ {
+ if w%1000 == 0 {
+ runtime.Gosched()
+ }
+ }
+ d1 := X.data1
+ d2 := X.data2
+ if d1 != i || d2 != float64(i) {
+ t.Fatalf("incorrect data: %d/%d (%d)", d1, d2, i)
+ }
+ }
+ }
+ c <- true
+ }(p)
+ }
+ <-c
+ <-c
+}
diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go
index b35eb53..ecb4808 100644
--- a/libgo/go/sync/atomic/doc.go
+++ b/libgo/go/sync/atomic/doc.go
@@ -22,9 +22,13 @@
//
package atomic
+import (
+ "unsafe"
+)
+
// BUG(rsc): On ARM, the 64-bit functions use instructions unavailable before ARM 11.
//
-// On x86-32, the 64-bit functions use instructions unavailable before the Pentium.
+// On x86-32, the 64-bit functions use instructions unavailable before the Pentium MMX.
// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
func CompareAndSwapInt32(val *int32, old, new int32) (swapped bool)
@@ -41,6 +45,9 @@ func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool)
// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool)
+// CompareAndSwapPointer executes the compare-and-swap operation for a unsafe.Pointer value.
+func CompareAndSwapPointer(val *unsafe.Pointer, old, new unsafe.Pointer) (swapped bool)
+
// AddInt32 atomically adds delta to *val and returns the new value.
func AddInt32(val *int32, delta int32) (new int32)
@@ -59,9 +66,39 @@ func AddUintptr(val *uintptr, delta uintptr) (new uintptr)
// LoadInt32 atomically loads *addr.
func LoadInt32(addr *int32) (val int32)
+// LoadInt64 atomically loads *addr.
+func LoadInt64(addr *int64) (val int64)
+
// LoadUint32 atomically loads *addr.
func LoadUint32(addr *uint32) (val uint32)
+// LoadUint64 atomically loads *addr.
+func LoadUint64(addr *uint64) (val uint64)
+
+// LoadUintptr atomically loads *addr.
+func LoadUintptr(addr *uintptr) (val uintptr)
+
+// LoadPointer atomically loads *addr.
+func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
+
+// StoreInt32 atomically stores val into *addr.
+func StoreInt32(addr *int32, val int32)
+
+// StoreInt64 atomically stores val into *addr.
+func StoreInt64(addr *int64, val int64)
+
+// StoreUint32 atomically stores val into *addr.
+func StoreUint32(addr *uint32, val uint32)
+
+// StoreUint64 atomically stores val into *addr.
+func StoreUint64(addr *uint64, val uint64)
+
+// StoreUintptr atomically stores val into *addr.
+func StoreUintptr(addr *uintptr, val uintptr)
+
+// StorePointer atomically stores val into *addr.
+func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)
+
// Helper for ARM. Linker will discard on other systems
func panic64() {
panic("sync/atomic: broken 64-bit atomic operations (buggy QEMU)")