aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2011-03-16 23:05:44 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2011-03-16 23:05:44 +0000
commit5133f00ef8baab894d92de1e8b8baae59815a8b6 (patch)
tree44176975832a3faf1626836e70c97d5edd674122 /libgo/go/sync
parentf617201f55938fc89b532f2240bdf77bea946471 (diff)
downloadgcc-5133f00ef8baab894d92de1e8b8baae59815a8b6.zip
gcc-5133f00ef8baab894d92de1e8b8baae59815a8b6.tar.gz
gcc-5133f00ef8baab894d92de1e8b8baae59815a8b6.tar.bz2
Update to current version of Go library (revision 94d654be2064).
From-SVN: r171076
Diffstat (limited to 'libgo/go/sync')
-rw-r--r--libgo/go/sync/atomic/atomic.c97
-rw-r--r--libgo/go/sync/atomic/atomic_test.go506
-rw-r--r--libgo/go/sync/atomic/doc.go57
-rw-r--r--libgo/go/sync/cond.go90
-rw-r--r--libgo/go/sync/cond_test.go99
-rw-r--r--libgo/go/sync/mutex.go42
-rw-r--r--libgo/go/sync/mutex_test.go13
-rw-r--r--libgo/go/sync/once.go2
-rw-r--r--libgo/go/sync/rwmutex.go33
-rw-r--r--libgo/go/sync/rwmutex_test.go50
-rw-r--r--libgo/go/sync/waitgroup.go86
-rw-r--r--libgo/go/sync/waitgroup_test.go60
-rw-r--r--libgo/go/sync/xadd_test.go9
13 files changed, 1094 insertions, 50 deletions
diff --git a/libgo/go/sync/atomic/atomic.c b/libgo/go/sync/atomic/atomic.c
new file mode 100644
index 0000000..e2d9b24
--- /dev/null
+++ b/libgo/go/sync/atomic/atomic.c
@@ -0,0 +1,97 @@
+/* atomic.c -- implement atomic routines for Go.
+
+ Copyright 2011 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include <stdint.h>
+
+_Bool CompareAndSwapInt32 (int32_t *, int32_t, int32_t)
+ asm ("libgo_sync.atomic.CompareAndSwapInt32");
+
+_Bool
+CompareAndSwapInt32 (int32_t *val, int32_t old, int32_t new)
+{
+ return __sync_bool_compare_and_swap (val, old, new);
+}
+
+_Bool CompareAndSwapInt64 (int64_t *, int64_t, int64_t)
+ asm ("libgo_sync.atomic.CompareAndSwapInt64");
+
+_Bool
+CompareAndSwapInt64 (int64_t *val, int64_t old, int64_t new)
+{
+ return __sync_bool_compare_and_swap (val, old, new);
+}
+
+_Bool CompareAndSwapUint32 (uint32_t *, uint32_t, uint32_t)
+ asm ("libgo_sync.atomic.CompareAndSwapUint32");
+
+_Bool
+CompareAndSwapUint32 (uint32_t *val, uint32_t old, uint32_t new)
+{
+ return __sync_bool_compare_and_swap (val, old, new);
+}
+
+_Bool CompareAndSwapUint64 (uint64_t *, uint64_t, uint64_t)
+ asm ("libgo_sync.atomic.CompareAndSwapUint64");
+
+_Bool
+CompareAndSwapUint64 (uint64_t *val, uint64_t old, uint64_t new)
+{
+ return __sync_bool_compare_and_swap (val, old, new);
+}
+
+_Bool CompareAndSwapUintptr (uintptr_t *, uintptr_t, uintptr_t)
+ asm ("libgo_sync.atomic.CompareAndSwapUintptr");
+
+_Bool
+CompareAndSwapUintptr (uintptr_t *val, uintptr_t old, uintptr_t new)
+{
+ return __sync_bool_compare_and_swap (val, old, new);
+}
+
+int32_t AddInt32 (int32_t *, int32_t)
+ asm ("libgo_sync.atomic.AddInt32");
+
+int32_t
+AddInt32 (int32_t *val, int32_t delta)
+{
+ return __sync_add_and_fetch (val, delta);
+}
+
+uint32_t AddUint32 (uint32_t *, uint32_t)
+ asm ("libgo_sync.atomic.AddUint32");
+
+uint32_t
+AddUint32 (uint32_t *val, uint32_t delta)
+{
+ return __sync_add_and_fetch (val, delta);
+}
+
+int64_t AddInt64 (int64_t *, int64_t)
+ asm ("libgo_sync.atomic.AddInt64");
+
+int64_t
+AddInt64 (int64_t *val, int64_t delta)
+{
+ return __sync_add_and_fetch (val, delta);
+}
+
+uint64_t AddUint64 (uint64_t *, uint64_t)
+ asm ("libgo_sync.atomic.AddUint64");
+
+uint64_t
+AddUint64 (uint64_t *val, uint64_t delta)
+{
+ return __sync_add_and_fetch (val, delta);
+}
+
+uintptr_t AddUintptr (uintptr_t *, uintptr_t)
+ asm ("libgo_sync.atomic.AddUintptr");
+
+uintptr_t
+AddUintptr (uintptr_t *val, uintptr_t delta)
+{
+ return __sync_add_and_fetch (val, delta);
+}
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go
new file mode 100644
index 0000000..7b204b1
--- /dev/null
+++ b/libgo/go/sync/atomic/atomic_test.go
@@ -0,0 +1,506 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import (
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+// Tests of correct behavior, without contention.
+// (Does the function work as advertised?)
+//
+// Test that the Add functions add correctly.
+// Test that the CompareAndSwap functions actually
+// do the comparison and the swap correctly.
+//
+// The loop over power-of-two values is meant to
+// ensure that the operations apply to the full word size.
+// The struct fields x.before and x.after check that the
+// operations do not extend past the full word size.
+
+const (
+ magic32 = 0xdedbeef
+ magic64 = 0xdeddeadbeefbeef
+)
+
+func TestAddInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j int32
+ for delta := int32(1); delta+delta > delta; delta += delta {
+ k := AddInt32(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ var j uint32
+ for delta := uint32(1); delta+delta > delta; delta += delta {
+ k := AddUint32(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestAddInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ x.before = magic64
+ x.after = magic64
+ var j int64
+ for delta := int64(1); delta+delta > delta; delta += delta {
+ k := AddInt64(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, int64(magic64), int64(magic64))
+ }
+}
+
+func TestAddUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ x.before = magic64
+ x.after = magic64
+ var j uint64
+ for delta := uint64(1); delta+delta > delta; delta += delta {
+ k := AddUint64(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestAddUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ var j uintptr
+ for delta := uintptr(1); delta+delta > delta; delta += delta {
+ k := AddUintptr(&x.i, delta)
+ j += delta
+ if x.i != j || k != j {
+ t.Fatalf("delta=%d i=%d j=%d k=%d", delta, x.i, j, k)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+func TestCompareAndSwapInt32(t *testing.T) {
+ var x struct {
+ before int32
+ i int32
+ after int32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := int32(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapInt32(&x.i, val, val+1) {
+ t.Errorf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapInt32(&x.i, val, val+2) {
+ t.Errorf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapUint32(t *testing.T) {
+ var x struct {
+ before uint32
+ i uint32
+ after uint32
+ }
+ x.before = magic32
+ x.after = magic32
+ for val := uint32(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapUint32(&x.i, val, val+1) {
+ t.Errorf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapUint32(&x.i, val, val+2) {
+ t.Errorf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic32 || x.after != magic32 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magic32, magic32)
+ }
+}
+
+func TestCompareAndSwapInt64(t *testing.T) {
+ var x struct {
+ before int64
+ i int64
+ after int64
+ }
+ x.before = magic64
+ x.after = magic64
+ for val := int64(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapInt64(&x.i, val, val+1) {
+ t.Errorf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapInt64(&x.i, val, val+2) {
+ t.Errorf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestCompareAndSwapUint64(t *testing.T) {
+ var x struct {
+ before uint64
+ i uint64
+ after uint64
+ }
+ x.before = magic64
+ x.after = magic64
+ for val := uint64(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapUint64(&x.i, val, val+1) {
+ t.Errorf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapUint64(&x.i, val, val+2) {
+ t.Errorf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magic64 || x.after != magic64 {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
+ }
+}
+
+func TestCompareAndSwapUintptr(t *testing.T) {
+ var x struct {
+ before uintptr
+ i uintptr
+ after uintptr
+ }
+ var m uint64 = magic64
+ magicptr := uintptr(m)
+ x.before = magicptr
+ x.after = magicptr
+ for val := uintptr(1); val+val > val; val += val {
+ x.i = val
+ if !CompareAndSwapUintptr(&x.i, val, val+1) {
+ t.Errorf("should have swapped %#x %#x", val, val+1)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ x.i = val + 1
+ if CompareAndSwapUintptr(&x.i, val, val+2) {
+ t.Errorf("should not have swapped %#x %#x", val, val+2)
+ }
+ if x.i != val+1 {
+ t.Errorf("wrong x.i after swap: x.i=%#x val+1=%#x", x.i, val+1)
+ }
+ }
+ if x.before != magicptr || x.after != magicptr {
+ t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, magicptr, magicptr)
+ }
+}
+
+// Tests of correct behavior, with contention.
+// (Is the function atomic?)
+//
+// For each function, we write a "hammer" function that repeatedly
+// uses the atomic operation to add 1 to a value. After running
+// multiple hammers in parallel, check that we end with the correct
+// total.
+
+var hammer32 = []struct {
+ name string
+ f func(*uint32, int)
+}{
+ {"AddInt32", hammerAddInt32},
+ {"AddUint32", hammerAddUint32},
+ {"AddUintptr", hammerAddUintptr32},
+ {"CompareAndSwapInt32", hammerCompareAndSwapInt32},
+ {"CompareAndSwapUint32", hammerCompareAndSwapUint32},
+ {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr32},
+}
+
+func init() {
+ var v uint64 = 1 << 50
+ if uintptr(v) != 0 {
+ // 64-bit system; clear uintptr tests
+ hammer32[2].f = nil
+ hammer32[5].f = nil
+ }
+}
+
+func hammerAddInt32(uval *uint32, count int) {
+ val := (*int32)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ AddInt32(val, 1)
+ }
+}
+
+func hammerAddUint32(val *uint32, count int) {
+ for i := 0; i < count; i++ {
+ AddUint32(val, 1)
+ }
+}
+
+func hammerAddUintptr32(uval *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ val := (*uintptr)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ AddUintptr(val, 1)
+ }
+}
+
+func hammerCompareAndSwapInt32(uval *uint32, count int) {
+ val := (*int32)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapInt32(val, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint32(val *uint32, count int) {
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapUint32(val, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr32(uval *uint32, count int) {
+ // only safe when uintptr is 32-bit.
+ // not called on 64-bit systems.
+ val := (*uintptr)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapUintptr(val, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func TestHammer32(t *testing.T) {
+ const (
+ n = 100000
+ p = 4
+ )
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
+
+ for _, tt := range hammer32 {
+ if tt.f == nil {
+ continue
+ }
+ c := make(chan int)
+ var val uint32
+ for i := 0; i < p; i++ {
+ go func() {
+ tt.f(&val, n)
+ c <- 1
+ }()
+ }
+ for i := 0; i < p; i++ {
+ <-c
+ }
+ if val != n*p {
+ t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
+ }
+ }
+}
+
+var hammer64 = []struct {
+ name string
+ f func(*uint64, int)
+}{
+ {"AddInt64", hammerAddInt64},
+ {"AddUint64", hammerAddUint64},
+ {"AddUintptr", hammerAddUintptr64},
+ {"CompareAndSwapInt64", hammerCompareAndSwapInt64},
+ {"CompareAndSwapUint64", hammerCompareAndSwapUint64},
+ {"CompareAndSwapUintptr", hammerCompareAndSwapUintptr64},
+}
+
+func init() {
+ var v uint64 = 1 << 50
+ if uintptr(v) == 0 {
+ // 32-bit system; clear uintptr tests
+ hammer64[2].f = nil
+ hammer64[5].f = nil
+ }
+}
+
+func hammerAddInt64(uval *uint64, count int) {
+ val := (*int64)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ AddInt64(val, 1)
+ }
+}
+
+func hammerAddUint64(val *uint64, count int) {
+ for i := 0; i < count; i++ {
+ AddUint64(val, 1)
+ }
+}
+
+func hammerAddUintptr64(uval *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ val := (*uintptr)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ AddUintptr(val, 1)
+ }
+}
+
+func hammerCompareAndSwapInt64(uval *uint64, count int) {
+ val := (*int64)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapInt64(val, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUint64(val *uint64, count int) {
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapUint64(val, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func hammerCompareAndSwapUintptr64(uval *uint64, count int) {
+ // only safe when uintptr is 64-bit.
+ // not called on 32-bit systems.
+ val := (*uintptr)(unsafe.Pointer(uval))
+ for i := 0; i < count; i++ {
+ for {
+ v := *val
+ if CompareAndSwapUintptr(val, v, v+1) {
+ break
+ }
+ }
+ }
+}
+
+func TestHammer64(t *testing.T) {
+ const (
+ n = 100000
+ p = 4
+ )
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(p))
+
+ for _, tt := range hammer64 {
+ if tt.f == nil {
+ continue
+ }
+ c := make(chan int)
+ var val uint64
+ for i := 0; i < p; i++ {
+ go func() {
+ tt.f(&val, n)
+ c <- 1
+ }()
+ }
+ for i := 0; i < p; i++ {
+ <-c
+ }
+ if val != n*p {
+ t.Errorf("%s: val=%d want %d", tt.name, val, n*p)
+ }
+ }
+}
diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go
new file mode 100644
index 0000000..1335def
--- /dev/null
+++ b/libgo/go/sync/atomic/doc.go
@@ -0,0 +1,57 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package atomic provides low-level atomic memory primitives
+// useful for implementing synchronization algorithms.
+//
+// These functions require great care to be used correctly.
+// Except for special, low-level applications, synchronization is better
+// done with channels or the facilities of the sync package.
+// Share memory by communicating;
+// don't communicate by sharing memory.
+//
+// The compare-and-swap operation, implemented by the CompareAndSwapT
+// functions, is the atomic equivalent of:
+//
+// if *val == old {
+// *val = new
+// return true
+// }
+// return false
+//
+package atomic
+
+// BUG(rsc): On ARM, the 64-bit functions use instructions unavailable before ARM 11.
+//
+// On x86-32, the 64-bit functions use instructions unavailable before the Pentium.
+
+// CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.
+func CompareAndSwapInt32(val *int32, old, new int32) (swapped bool)
+
+// CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.
+func CompareAndSwapInt64(val *int64, old, new int64) (swapped bool)
+
+// CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.
+func CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool)
+
+// CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.
+func CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool)
+
+// CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.
+func CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool)
+
+// AddInt32 atomically adds delta to *val and returns the new value.
+func AddInt32(val *int32, delta int32) (new int32)
+
+// AddUint32 atomically adds delta to *val and returns the new value.
+func AddUint32(val *uint32, delta uint32) (new uint32)
+
+// AddInt64 atomically adds delta to *val and returns the new value.
+func AddInt64(val *int64, delta int64) (new int64)
+
+// AddUint64 atomically adds delta to *val and returns the new value.
+func AddUint64(val *uint64, delta uint64) (new uint64)
+
+// AddUintptr atomically adds delta to *val and returns the new value.
+func AddUintptr(val *uintptr, delta uintptr) (new uintptr)
diff --git a/libgo/go/sync/cond.go b/libgo/go/sync/cond.go
new file mode 100644
index 0000000..ea48f2e
--- /dev/null
+++ b/libgo/go/sync/cond.go
@@ -0,0 +1,90 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import "runtime"
+
+// Cond implements a condition variable, a rendezvous point
+// for goroutines waiting for or announcing the occurrence
+// of an event.
+//
+// Each Cond has an associated Locker L (often a *Mutex or *RWMutex),
+// which must be held when changing the condition and
+// when calling the Wait method.
+type Cond struct {
+ L Locker // held while observing or changing the condition
+ m Mutex // held to avoid internal races
+ waiters int // number of goroutines blocked on Wait
+ sema *uint32
+}
+
+// NewCond returns a new Cond with Locker l.
+func NewCond(l Locker) *Cond {
+ return &Cond{L: l}
+}
+
+// Wait atomically unlocks c.L and suspends execution
+// of the calling goroutine. After later resuming execution,
+// Wait locks c.L before returning.
+//
+// Because L is not locked when Wait first resumes, the caller
+// typically cannot assume that the condition is true when
+// Wait returns. Instead, the caller should Wait in a loop:
+//
+// c.L.Lock()
+// for !condition() {
+// c.Wait()
+// }
+// ... make use of condition ...
+// c.L.Unlock()
+//
+func (c *Cond) Wait() {
+ c.m.Lock()
+ if c.sema == nil {
+ c.sema = new(uint32)
+ }
+ s := c.sema
+ c.waiters++
+ c.m.Unlock()
+ c.L.Unlock()
+ runtime.Semacquire(s)
+ c.L.Lock()
+}
+
+// Signal wakes one goroutine waiting on c, if there is any.
+//
+// It is allowed but not required for the caller to hold c.L
+// during the call.
+func (c *Cond) Signal() {
+ c.m.Lock()
+ if c.waiters > 0 {
+ c.waiters--
+ runtime.Semrelease(c.sema)
+ }
+ c.m.Unlock()
+}
+
+// Broadcast wakes all goroutines waiting on c.
+//
+// It is allowed but not required for the caller to hold c.L
+// during the call.
+func (c *Cond) Broadcast() {
+ c.m.Lock()
+ if c.waiters > 0 {
+ s := c.sema
+ n := c.waiters
+ for i := 0; i < n; i++ {
+ runtime.Semrelease(s)
+ }
+ // We just issued n wakeups via the semaphore s.
+ // To ensure that they wake up the existing waiters
+ // and not waiters that arrive after Broadcast returns,
+ // clear c.sema. The next operation will allocate
+ // a new one.
+ c.sema = nil
+ c.waiters = 0
+ }
+ c.m.Unlock()
+}
diff --git a/libgo/go/sync/cond_test.go b/libgo/go/sync/cond_test.go
new file mode 100644
index 0000000..846f98b
--- /dev/null
+++ b/libgo/go/sync/cond_test.go
@@ -0,0 +1,99 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package sync_test
+
+import (
+ . "sync"
+ "testing"
+)
+
+func TestCondSignal(t *testing.T) {
+ var m Mutex
+ c := NewCond(&m)
+ n := 2
+ running := make(chan bool, n)
+ awake := make(chan bool, n)
+ for i := 0; i < n; i++ {
+ go func() {
+ m.Lock()
+ running <- true
+ c.Wait()
+ awake <- true
+ m.Unlock()
+ }()
+ }
+ for i := 0; i < n; i++ {
+ <-running // Wait for everyone to run.
+ }
+ for n > 0 {
+ select {
+ case <-awake:
+ t.Fatal("goroutine not asleep")
+ default:
+ }
+ m.Lock()
+ c.Signal()
+ m.Unlock()
+ <-awake // Will deadlock if no goroutine wakes up
+ select {
+ case <-awake:
+ t.Fatal("too many goroutines awake")
+ default:
+ }
+ n--
+ }
+ c.Signal()
+}
+
+func TestCondBroadcast(t *testing.T) {
+ var m Mutex
+ c := NewCond(&m)
+ n := 200
+ running := make(chan int, n)
+ awake := make(chan int, n)
+ exit := false
+ for i := 0; i < n; i++ {
+ go func(g int) {
+ m.Lock()
+ for !exit {
+ running <- g
+ c.Wait()
+ awake <- g
+ }
+ m.Unlock()
+ }(i)
+ }
+ for i := 0; i < n; i++ {
+ for i := 0; i < n; i++ {
+ <-running // Will deadlock unless n are running.
+ }
+ if i == n-1 {
+ m.Lock()
+ exit = true
+ m.Unlock()
+ }
+ select {
+ case <-awake:
+ t.Fatal("goroutine not asleep")
+ default:
+ }
+ m.Lock()
+ c.Broadcast()
+ m.Unlock()
+ seen := make([]bool, n)
+ for i := 0; i < n; i++ {
+ g := <-awake
+ if seen[g] {
+ t.Fatal("goroutine woke up twice")
+ }
+ seen[g] = true
+ }
+ }
+ select {
+ case <-running:
+ t.Fatal("goroutine did not exit")
+ default:
+ }
+ c.Broadcast()
+}
diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go
index 9a2bb2b..da565d3 100644
--- a/libgo/go/sync/mutex.go
+++ b/libgo/go/sync/mutex.go
@@ -3,43 +3,36 @@
// license that can be found in the LICENSE file.
// The sync package provides basic synchronization primitives
-// such as mutual exclusion locks. Other than the Once type,
-// most are intended for use by low-level library routines.
-// Higher-level synchronization is better done via channels
-// and communication.
+// such as mutual exclusion locks. Other than the Once and
+// WaitGroup types, most are intended for use by low-level
+// library routines. Higher-level synchronization is better
+// done via channels and communication.
package sync
-import "runtime"
-
-func cas(val *uint32, old, new uint32) bool
+import (
+ "runtime"
+ "sync/atomic"
+)
// A Mutex is a mutual exclusion lock.
// Mutexes can be created as part of other structures;
// the zero value for a Mutex is an unlocked mutex.
type Mutex struct {
- key uint32
+ key int32
sema uint32
}
-// Add delta to *val, and return the new *val in a thread-safe way. If multiple
-// goroutines call xadd on the same val concurrently, the changes will be
-// serialized, and all the deltas will be added in an undefined order.
-func xadd(val *uint32, delta int32) (new uint32) {
- for {
- v := *val
- nv := v + uint32(delta)
- if cas(val, v, nv) {
- return nv
- }
- }
- panic("unreached")
+// A Locker represents an object that can be locked and unlocked.
+type Locker interface {
+ Lock()
+ Unlock()
}
// Lock locks m.
// If the lock is already in use, the calling goroutine
// blocks until the mutex is available.
func (m *Mutex) Lock() {
- if xadd(&m.key, 1) == 1 {
+ if atomic.AddInt32(&m.key, 1) == 1 {
// changed from 0 to 1; we hold lock
return
}
@@ -53,9 +46,14 @@ func (m *Mutex) Lock() {
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
- if xadd(&m.key, -1) == 0 {
+ switch v := atomic.AddInt32(&m.key, -1); {
+ case v == 0:
// changed from 1 to 0; no contention
return
+ case v == -1:
+ // changed from 0 to -1: wasn't locked
+ // (or there are 4 billion goroutines waiting)
+ panic("sync: unlock of unlocked mutex")
}
runtime.Semrelease(&m.sema)
}
diff --git a/libgo/go/sync/mutex_test.go b/libgo/go/sync/mutex_test.go
index d0e048e..f5c20ca 100644
--- a/libgo/go/sync/mutex_test.go
+++ b/libgo/go/sync/mutex_test.go
@@ -89,3 +89,16 @@ func BenchmarkContendedMutex(b *testing.B) {
<-c
<-c
}
+
+func TestMutexPanic(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Fatalf("unlock of unlocked mutex did not panic")
+ }
+ }()
+
+ var mu Mutex
+ mu.Lock()
+ mu.Unlock()
+ mu.Unlock()
+}
diff --git a/libgo/go/sync/once.go b/libgo/go/sync/once.go
index 8c877cd..b6f5f5a 100644
--- a/libgo/go/sync/once.go
+++ b/libgo/go/sync/once.go
@@ -13,7 +13,7 @@ type Once struct {
// Do calls the function f if and only if the method is being called for the
// first time with this receiver. In other words, given
// var once Once
-// if Do(f) is called multiple times, only the first call will invoke f,
+// if once.Do(f) is called multiple times, only the first call will invoke f,
// even if f has a different value in each invocation. A new instance of
// Once is required for each function to execute.
//
diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go
index 06fd0b0..9248b4b 100644
--- a/libgo/go/sync/rwmutex.go
+++ b/libgo/go/sync/rwmutex.go
@@ -4,6 +4,8 @@
package sync
+import "sync/atomic"
+
// An RWMutex is a reader/writer mutual exclusion lock.
// The lock can be held by an arbitrary number of readers
// or a single writer.
@@ -14,9 +16,9 @@ package sync
// Writers take priority over Readers: no new RLocks
// are granted while a blocked Lock call is waiting.
type RWMutex struct {
- w Mutex // held if there are pending readers or writers
- r Mutex // held if the w is being rd
- readerCount uint32 // number of pending readers
+ w Mutex // held if there are pending readers or writers
+ r Mutex // held if the w is being rd
+ readerCount int32 // number of pending readers
}
// RLock locks rw for reading.
@@ -33,7 +35,7 @@ func (rw *RWMutex) RLock() {
// B: rw.RUnlock()
// ... (new readers come and go indefinitely, W is starving)
rw.r.Lock()
- if xadd(&rw.readerCount, 1) == 1 {
+ if atomic.AddInt32(&rw.readerCount, 1) == 1 {
// The first reader locks rw.w, so writers will be blocked
// while the readers have the RLock.
rw.w.Lock()
@@ -46,7 +48,7 @@ func (rw *RWMutex) RLock() {
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
- if xadd(&rw.readerCount, -1) == 0 {
+ if atomic.AddInt32(&rw.readerCount, -1) == 0 {
// last reader finished, enable writers
rw.w.Unlock()
}
@@ -64,12 +66,21 @@ func (rw *RWMutex) Lock() {
rw.r.Unlock()
}
-// Unlock unlocks rw for writing.
-// It is a run-time error if rw is not locked for writing
-// on entry to Unlock.
+// Unlock unlocks rw for writing. It is a run-time error if rw is
+// not locked for writing on entry to Unlock.
//
-// Like for Mutexes,
-// a locked RWMutex is not associated with a particular goroutine.
-// It is allowed for one goroutine to RLock (Lock) an RWMutex and then
+// As with Mutexes, a locked RWMutex is not associated with a particular
+// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() { rw.w.Unlock() }
+
+// RLocker returns a Locker interface that implements
+// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock.
+func (rw *RWMutex) RLocker() Locker {
+ return (*rlocker)(rw)
+}
+
+type rlocker RWMutex
+
+func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
+func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
diff --git a/libgo/go/sync/rwmutex_test.go b/libgo/go/sync/rwmutex_test.go
index 111bca1..4050792 100644
--- a/libgo/go/sync/rwmutex_test.go
+++ b/libgo/go/sync/rwmutex_test.go
@@ -10,6 +10,7 @@ import (
"fmt"
"runtime"
. "sync"
+ "sync/atomic"
"testing"
)
@@ -49,31 +50,31 @@ func TestParallelReaders(t *testing.T) {
doTestParallelReaders(4, 2)
}
-func reader(rwm *RWMutex, num_iterations int, activity *uint32, cdone chan bool) {
+func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
for i := 0; i < num_iterations; i++ {
rwm.RLock()
- n := Xadd(activity, 1)
+ n := atomic.AddInt32(activity, 1)
if n < 1 || n >= 10000 {
panic(fmt.Sprintf("wlock(%d)\n", n))
}
for i := 0; i < 100; i++ {
}
- Xadd(activity, -1)
+ atomic.AddInt32(activity, -1)
rwm.RUnlock()
}
cdone <- true
}
-func writer(rwm *RWMutex, num_iterations int, activity *uint32, cdone chan bool) {
+func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
for i := 0; i < num_iterations; i++ {
rwm.Lock()
- n := Xadd(activity, 10000)
+ n := atomic.AddInt32(activity, 10000)
if n != 10000 {
panic(fmt.Sprintf("wlock(%d)\n", n))
}
for i := 0; i < 100; i++ {
}
- Xadd(activity, -10000)
+ atomic.AddInt32(activity, -10000)
rwm.Unlock()
}
cdone <- true
@@ -82,7 +83,7 @@ func writer(rwm *RWMutex, num_iterations int, activity *uint32, cdone chan bool)
func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
runtime.GOMAXPROCS(gomaxprocs)
// Number of active readers + 10000 * number of active writers.
- var activity uint32
+ var activity int32
var rwm RWMutex
cdone := make(chan bool)
go writer(&rwm, num_iterations, &activity, cdone)
@@ -112,3 +113,38 @@ func TestRWMutex(t *testing.T) {
HammerRWMutex(10, 10, 1000)
HammerRWMutex(10, 5, 10000)
}
+
+func TestRLocker(t *testing.T) {
+ var wl RWMutex
+ var rl Locker
+ wlocked := make(chan bool, 1)
+ rlocked := make(chan bool, 1)
+ rl = wl.RLocker()
+ n := 10
+ go func() {
+ for i := 0; i < n; i++ {
+ rl.Lock()
+ rl.Lock()
+ rlocked <- true
+ wl.Lock()
+ wlocked <- true
+ }
+ }()
+ for i := 0; i < n; i++ {
+ <-rlocked
+ rl.Unlock()
+ select {
+ case <-wlocked:
+ t.Fatal("RLocker() didn't read-lock it")
+ default:
+ }
+ rl.Unlock()
+ <-wlocked
+ select {
+ case <-rlocked:
+ t.Fatal("RLocker() didn't respect the write lock")
+ default:
+ }
+ wl.Unlock()
+ }
+}
diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go
new file mode 100644
index 0000000..68e1d50
--- /dev/null
+++ b/libgo/go/sync/waitgroup.go
@@ -0,0 +1,86 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync
+
+import "runtime"
+
+// A WaitGroup waits for a collection of goroutines to finish.
+// The main goroutine calls Add to set the number of
+// goroutines to wait for. Then each of the goroutines
+// runs and calls Done when finished. At the same time,
+// Wait can be used to block until all goroutines have finished.
+//
+// For example:
+//
+// for i := 0; i < n; i++ {
+// if !condition(i) {
+// continue
+// }
+// wg.Add(1)
+// go func() {
+// // Do something.
+// wg.Done()
+// }
+// }
+// wg.Wait()
+//
+type WaitGroup struct {
+ m Mutex
+ counter int
+ waiters int
+ sema *uint32
+}
+
+// WaitGroup creates a new semaphore each time the old semaphore
+// is released. This is to avoid the following race:
+//
+// G1: Add(1)
+// G1: go G2()
+// G1: Wait() // Context switch after Unlock() and before Semacquire().
+// G2: Done() // Release semaphore: sema == 1, waiters == 0. G1 doesn't run yet.
+// G3: Wait() // Finds counter == 0, waiters == 0, doesn't block.
+// G3: Add(1) // Makes counter == 1, waiters == 0.
+// G3: go G4()
+// G3: Wait() // G1 still hasn't run, G3 finds sema == 1, unblocked! Bug.
+
+// Add adds delta, which may be negative, to the WaitGroup counter.
+// If the counter becomes zero, all goroutines blocked on Wait() are released.
+func (wg *WaitGroup) Add(delta int) {
+ wg.m.Lock()
+ if delta < -wg.counter {
+ wg.m.Unlock()
+ panic("sync: negative WaitGroup count")
+ }
+ wg.counter += delta
+ if wg.counter == 0 && wg.waiters > 0 {
+ for i := 0; i < wg.waiters; i++ {
+ runtime.Semrelease(wg.sema)
+ }
+ wg.waiters = 0
+ wg.sema = nil
+ }
+ wg.m.Unlock()
+}
+
+// Done decrements the WaitGroup counter.
+func (wg *WaitGroup) Done() {
+ wg.Add(-1)
+}
+
+// Wait blocks until the WaitGroup counter is zero.
+func (wg *WaitGroup) Wait() {
+ wg.m.Lock()
+ if wg.counter == 0 {
+ wg.m.Unlock()
+ return
+ }
+ wg.waiters++
+ if wg.sema == nil {
+ wg.sema = new(uint32)
+ }
+ s := wg.sema
+ wg.m.Unlock()
+ runtime.Semacquire(s)
+}
diff --git a/libgo/go/sync/waitgroup_test.go b/libgo/go/sync/waitgroup_test.go
new file mode 100644
index 0000000..fe35732
--- /dev/null
+++ b/libgo/go/sync/waitgroup_test.go
@@ -0,0 +1,60 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync_test
+
+import (
+ . "sync"
+ "testing"
+)
+
+func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) {
+ n := 16
+ wg1.Add(n)
+ wg2.Add(n)
+ exited := make(chan bool, n)
+ for i := 0; i != n; i++ {
+ go func(i int) {
+ wg1.Done()
+ wg2.Wait()
+ exited <- true
+ }(i)
+ }
+ wg1.Wait()
+ for i := 0; i != n; i++ {
+ select {
+ case <-exited:
+ t.Fatal("WaitGroup released group too soon")
+ default:
+ }
+ wg2.Done()
+ }
+ for i := 0; i != n; i++ {
+ <-exited // Will block if barrier fails to unlock someone.
+ }
+}
+
+func TestWaitGroup(t *testing.T) {
+ wg1 := &WaitGroup{}
+ wg2 := &WaitGroup{}
+
+ // Run the same test a few times to ensure barrier is in a proper state.
+ for i := 0; i != 8; i++ {
+ testWaitGroup(t, wg1, wg2)
+ }
+}
+
+func TestWaitGroupMisuse(t *testing.T) {
+ defer func() {
+ err := recover()
+ if err != "sync: negative WaitGroup count" {
+ t.Fatalf("Unexpected panic: %#v", err)
+ }
+ }()
+ wg := &WaitGroup{}
+ wg.Add(1)
+ wg.Done()
+ wg.Done()
+ t.Fatal("Should panic")
+}
diff --git a/libgo/go/sync/xadd_test.go b/libgo/go/sync/xadd_test.go
deleted file mode 100644
index 8b2ef76..0000000
--- a/libgo/go/sync/xadd_test.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-func Xadd(val *uint32, delta int32) (new uint32) {
- return xadd(val, delta)
-}