aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2016-07-22 18:15:38 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-07-22 18:15:38 +0000
commit22b955cca564a9a3a5b8c9d9dd1e295b7943c128 (patch)
treeabdbd898676e1f853fca2d7e031d105d7ebcf676 /libgo/go/sync
parent9d04a3af4c6491536badf6bde9707c907e4d196b (diff)
downloadgcc-22b955cca564a9a3a5b8c9d9dd1e295b7943c128.zip
gcc-22b955cca564a9a3a5b8c9d9dd1e295b7943c128.tar.gz
gcc-22b955cca564a9a3a5b8c9d9dd1e295b7943c128.tar.bz2
libgo: update to go1.7rc3
Reviewed-on: https://go-review.googlesource.com/25150 From-SVN: r238662
Diffstat (limited to 'libgo/go/sync')
-rw-r--r--libgo/go/sync/atomic/64bit_arm.go2
-rw-r--r--libgo/go/sync/atomic/atomic_test.go4
-rw-r--r--libgo/go/sync/atomic/doc.go2
-rw-r--r--libgo/go/sync/atomic/value.go14
-rw-r--r--libgo/go/sync/atomic/value_test.go7
-rw-r--r--libgo/go/sync/cond.go63
-rw-r--r--libgo/go/sync/cond_test.go59
-rw-r--r--libgo/go/sync/export_test.go2
-rw-r--r--libgo/go/sync/mutex.go6
-rw-r--r--libgo/go/sync/once.go4
-rw-r--r--libgo/go/sync/pool.go9
-rw-r--r--libgo/go/sync/runtime.go37
-rw-r--r--libgo/go/sync/runtime_sema_test.go3
-rw-r--r--libgo/go/sync/rwmutex.go23
-rw-r--r--libgo/go/sync/waitgroup.go8
15 files changed, 162 insertions, 81 deletions
diff --git a/libgo/go/sync/atomic/64bit_arm.go b/libgo/go/sync/atomic/64bit_arm.go
index b98e608..4ef1174 100644
--- a/libgo/go/sync/atomic/64bit_arm.go
+++ b/libgo/go/sync/atomic/64bit_arm.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/sync/atomic/atomic_test.go b/libgo/go/sync/atomic/atomic_test.go
index 6dae0fd..5a33d7f 100644
--- a/libgo/go/sync/atomic/atomic_test.go
+++ b/libgo/go/sync/atomic/atomic_test.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -747,7 +747,7 @@ func TestStorePointer(t *testing.T) {
// (Is the function atomic?)
//
// For each function, we write a "hammer" function that repeatedly
-// uses the atomic operation to add 1 to a value. After running
+// uses the atomic operation to add 1 to a value. After running
// multiple hammers in parallel, check that we end with the correct
// total.
// Swap can't add 1, so it uses a different scheme.
diff --git a/libgo/go/sync/atomic/doc.go b/libgo/go/sync/atomic/doc.go
index 10fb8c9..302ff43 100644
--- a/libgo/go/sync/atomic/doc.go
+++ b/libgo/go/sync/atomic/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors. All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/sync/atomic/value.go b/libgo/go/sync/atomic/value.go
index ab3aa112..30abf72 100644
--- a/libgo/go/sync/atomic/value.go
+++ b/libgo/go/sync/atomic/value.go
@@ -12,7 +12,11 @@ import (
// Values can be created as part of other data structures.
// The zero value for a Value returns nil from Load.
// Once Store has been called, a Value must not be copied.
+//
+// A Value must not be copied after first use.
type Value struct {
+ noCopy noCopy
+
v interface{}
}
@@ -83,3 +87,13 @@ func (v *Value) Store(x interface{}) {
// Disable/enable preemption, implemented in runtime.
func runtime_procPin()
func runtime_procUnpin()
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
diff --git a/libgo/go/sync/atomic/value_test.go b/libgo/go/sync/atomic/value_test.go
index 382dc68..fd90451 100644
--- a/libgo/go/sync/atomic/value_test.go
+++ b/libgo/go/sync/atomic/value_test.go
@@ -86,6 +86,11 @@ func TestValueConcurrent(t *testing.T) {
{complex(0, 0), complex(1, 2), complex(3, 4), complex(5, 6)},
}
p := 4 * runtime.GOMAXPROCS(0)
+ N := int(1e5)
+ if testing.Short() {
+ p /= 2
+ N = 1e3
+ }
for _, test := range tests {
var v Value
done := make(chan bool)
@@ -93,7 +98,7 @@ func TestValueConcurrent(t *testing.T) {
go func() {
r := rand.New(rand.NewSource(rand.Int63()))
loop:
- for j := 0; j < 1e5; j++ {
+ for j := 0; j < N; j++ {
x := test[r.Intn(len(test))]
v.Store(x)
x = v.Load()
diff --git a/libgo/go/sync/cond.go b/libgo/go/sync/cond.go
index 0aefcda..c070d9d 100644
--- a/libgo/go/sync/cond.go
+++ b/libgo/go/sync/cond.go
@@ -5,7 +5,6 @@
package sync
import (
- "internal/race"
"sync/atomic"
"unsafe"
)
@@ -21,11 +20,12 @@ import (
// A Cond can be created as part of other structures.
// A Cond must not be copied after first use.
type Cond struct {
+ noCopy noCopy
+
// L is held while observing or changing the condition
L Locker
- sema syncSema
- waiters uint32 // number of waiters
+ notify notifyList
checker copyChecker
}
@@ -35,13 +35,13 @@ func NewCond(l Locker) *Cond {
}
// Wait atomically unlocks c.L and suspends execution
-// of the calling goroutine. After later resuming execution,
-// Wait locks c.L before returning. Unlike in other systems,
+// of the calling goroutine. After later resuming execution,
+// Wait locks c.L before returning. Unlike in other systems,
// Wait cannot return unless awoken by Broadcast or Signal.
//
// Because c.L is not locked when Wait first resumes, the caller
// typically cannot assume that the condition is true when
-// Wait returns. Instead, the caller should Wait in a loop:
+// Wait returns. Instead, the caller should Wait in a loop:
//
// c.L.Lock()
// for !condition() {
@@ -52,15 +52,9 @@ func NewCond(l Locker) *Cond {
//
func (c *Cond) Wait() {
c.checker.check()
- if race.Enabled {
- race.Disable()
- }
- atomic.AddUint32(&c.waiters, 1)
- if race.Enabled {
- race.Enable()
- }
+ t := runtime_notifyListAdd(&c.notify)
c.L.Unlock()
- runtime_Syncsemacquire(&c.sema)
+ runtime_notifyListWait(&c.notify, t)
c.L.Lock()
}
@@ -69,7 +63,8 @@ func (c *Cond) Wait() {
// It is allowed but not required for the caller to hold c.L
// during the call.
func (c *Cond) Signal() {
- c.signalImpl(false)
+ c.checker.check()
+ runtime_notifyListNotifyOne(&c.notify)
}
// Broadcast wakes all goroutines waiting on c.
@@ -77,34 +72,8 @@ func (c *Cond) Signal() {
// It is allowed but not required for the caller to hold c.L
// during the call.
func (c *Cond) Broadcast() {
- c.signalImpl(true)
-}
-
-func (c *Cond) signalImpl(all bool) {
c.checker.check()
- if race.Enabled {
- race.Disable()
- }
- for {
- old := atomic.LoadUint32(&c.waiters)
- if old == 0 {
- if race.Enabled {
- race.Enable()
- }
- return
- }
- new := old - 1
- if all {
- new = 0
- }
- if atomic.CompareAndSwapUint32(&c.waiters, old, new) {
- if race.Enabled {
- race.Enable()
- }
- runtime_Syncsemrelease(&c.sema, old-new)
- return
- }
- }
+ runtime_notifyListNotifyAll(&c.notify)
}
// copyChecker holds back pointer to itself to detect object copying.
@@ -117,3 +86,13 @@ func (c *copyChecker) check() {
panic("sync.Cond is copied")
}
}
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
diff --git a/libgo/go/sync/cond_test.go b/libgo/go/sync/cond_test.go
index 467c806..7b07295 100644
--- a/libgo/go/sync/cond_test.go
+++ b/libgo/go/sync/cond_test.go
@@ -8,6 +8,7 @@ import (
"runtime"
"testing"
+ "time"
)
func TestCondSignal(t *testing.T) {
@@ -183,6 +184,64 @@ func TestRace(t *testing.T) {
<-done
}
+func TestCondSignalStealing(t *testing.T) {
+ for iters := 0; iters < 1000; iters++ {
+ var m Mutex
+ cond := NewCond(&m)
+
+ // Start a waiter.
+ ch := make(chan struct{})
+ go func() {
+ m.Lock()
+ ch <- struct{}{}
+ cond.Wait()
+ m.Unlock()
+
+ ch <- struct{}{}
+ }()
+
+ <-ch
+ m.Lock()
+ m.Unlock()
+
+ // We know that the waiter is in the cond.Wait() call because we
+ // synchronized with it, then acquired/released the mutex it was
+ // holding when we synchronized.
+ //
+ // Start two goroutines that will race: one will broadcast on
+ // the cond var, the other will wait on it.
+ //
+ // The new waiter may or may not get notified, but the first one
+ // has to be notified.
+ done := false
+ go func() {
+ cond.Broadcast()
+ }()
+
+ go func() {
+ m.Lock()
+ for !done {
+ cond.Wait()
+ }
+ m.Unlock()
+ }()
+
+ // Check that the first waiter does get signaled.
+ select {
+ case <-ch:
+ case <-time.After(2 * time.Second):
+ t.Fatalf("First waiter didn't get broadcast.")
+ }
+
+ // Release the second waiter in case it didn't get the
+ // broadcast.
+ m.Lock()
+ done = true
+ m.Unlock()
+ cond.Broadcast()
+ }
+}
+
func TestCondCopy(t *testing.T) {
defer func() {
err := recover()
diff --git a/libgo/go/sync/export_test.go b/libgo/go/sync/export_test.go
index fa5983a..6ed38da 100644
--- a/libgo/go/sync/export_test.go
+++ b/libgo/go/sync/export_test.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go
index eb52614..9089279 100644
--- a/libgo/go/sync/mutex.go
+++ b/libgo/go/sync/mutex.go
@@ -3,8 +3,8 @@
// license that can be found in the LICENSE file.
// Package sync provides basic synchronization primitives such as mutual
-// exclusion locks. Other than the Once and WaitGroup types, most are intended
-// for use by low-level library routines. Higher-level synchronization is
+// exclusion locks. Other than the Once and WaitGroup types, most are intended
+// for use by low-level library routines. Higher-level synchronization is
// better done via channels and communication.
//
// Values containing the types defined in this package should not be copied.
@@ -19,6 +19,8 @@ import (
// A Mutex is a mutual exclusion lock.
// Mutexes can be created as part of other structures;
// the zero value for a Mutex is an unlocked mutex.
+//
+// A Mutex must not be copied after first use.
type Mutex struct {
state int32
sema uint32
diff --git a/libgo/go/sync/once.go b/libgo/go/sync/once.go
index 10b42fd..d8ef952 100644
--- a/libgo/go/sync/once.go
+++ b/libgo/go/sync/once.go
@@ -18,10 +18,10 @@ type Once struct {
// first time for this instance of Once. In other words, given
// var once Once
// if once.Do(f) is called multiple times, only the first call will invoke f,
-// even if f has a different value in each invocation. A new instance of
+// even if f has a different value in each invocation. A new instance of
// Once is required for each function to execute.
//
-// Do is intended for initialization that must be run exactly once. Since f
+// Do is intended for initialization that must be run exactly once. Since f
// is niladic, it may be necessary to use a function literal to capture the
// arguments to a function to be invoked by Do:
// config.once.Do(func() { config.init(filename) })
diff --git a/libgo/go/sync/pool.go b/libgo/go/sync/pool.go
index 381af0b..bf29d88 100644
--- a/libgo/go/sync/pool.go
+++ b/libgo/go/sync/pool.go
@@ -40,7 +40,10 @@ import (
// that scenario. It is more efficient to have such objects implement their own
// free list.
//
+// A Pool must not be copied after first use.
type Pool struct {
+ noCopy noCopy
+
local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
localSize uintptr // size of the local array
@@ -149,7 +152,7 @@ func (p *Pool) getSlow() (x interface{}) {
func (p *Pool) pin() *poolLocal {
pid := runtime_procPin()
// In pinSlow we store to localSize and then to local, here we load in opposite order.
- // Since we've disabled preemption, GC can not happen in between.
+ // Since we've disabled preemption, GC cannot happen in between.
// Thus here we must observe local at least as large localSize.
// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
s := atomic.LoadUintptr(&p.localSize) // load-acquire
@@ -179,8 +182,8 @@ func (p *Pool) pinSlow() *poolLocal {
// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
size := runtime.GOMAXPROCS(0)
local := make([]poolLocal, size)
- atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release
- atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
+ atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
+ atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
return &local[pid]
}
diff --git a/libgo/go/sync/runtime.go b/libgo/go/sync/runtime.go
index c66d2de..96c56c8 100644
--- a/libgo/go/sync/runtime.go
+++ b/libgo/go/sync/runtime.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors. All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -19,24 +19,33 @@ func runtime_Semacquire(s *uint32)
// library and should not be used directly.
func runtime_Semrelease(s *uint32)
-// Approximation of syncSema in runtime/sema.go.
-type syncSema struct {
- lock uintptr
- head unsafe.Pointer
- tail unsafe.Pointer
+// Approximation of notifyList in runtime/sema.go. Size and alignment must
+// agree.
+type notifyList struct {
+ wait uint32
+ notify uint32
+ lock uintptr
+ head unsafe.Pointer
+ tail unsafe.Pointer
}
-// Syncsemacquire waits for a pairing Syncsemrelease on the same semaphore s.
-func runtime_Syncsemacquire(s *syncSema)
+// See runtime/sema.go for documentation.
+func runtime_notifyListAdd(l *notifyList) uint32
-// Syncsemrelease waits for n pairing Syncsemacquire on the same semaphore s.
-func runtime_Syncsemrelease(s *syncSema, n uint32)
+// See runtime/sema.go for documentation.
+func runtime_notifyListWait(l *notifyList, t uint32)
-// Ensure that sync and runtime agree on size of syncSema.
-func runtime_Syncsemcheck(size uintptr)
+// See runtime/sema.go for documentation.
+func runtime_notifyListNotifyAll(l *notifyList)
+
+// See runtime/sema.go for documentation.
+func runtime_notifyListNotifyOne(l *notifyList)
+
+// Ensure that sync and runtime agree on size of notifyList.
+func runtime_notifyListCheck(size uintptr)
func init() {
- var s syncSema
- runtime_Syncsemcheck(unsafe.Sizeof(s))
+ var n notifyList
+ runtime_notifyListCheck(unsafe.Sizeof(n))
}
// Active spinning runtime support.
diff --git a/libgo/go/sync/runtime_sema_test.go b/libgo/go/sync/runtime_sema_test.go
index 5b7dd3d..a2382f4 100644
--- a/libgo/go/sync/runtime_sema_test.go
+++ b/libgo/go/sync/runtime_sema_test.go
@@ -25,6 +25,9 @@ func BenchmarkSemaUncontended(b *testing.B) {
}
func benchmarkSema(b *testing.B, block, work bool) {
+ if b.N == 0 {
+ return
+ }
sem := uint32(0)
if block {
done := make(chan bool)
diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go
index d438c93..6734360 100644
--- a/libgo/go/sync/rwmutex.go
+++ b/libgo/go/sync/rwmutex.go
@@ -11,11 +11,17 @@ import (
)
// An RWMutex is a reader/writer mutual exclusion lock.
-// The lock can be held by an arbitrary number of readers
-// or a single writer.
-// RWMutexes can be created as part of other
-// structures; the zero value for a RWMutex is
-// an unlocked mutex.
+// The lock can be held by an arbitrary number of readers or a single writer.
+// RWMutexes can be created as part of other structures;
+// the zero value for a RWMutex is an unlocked mutex.
+//
+// An RWMutex must not be copied after first use.
+//
+// If a goroutine holds a RWMutex for reading, it must not expect this or any
+// other goroutine to be able to also take the read lock until the first read
+// lock is released. In particular, this prohibits recursive read locking.
+// This is to ensure that the lock eventually becomes available;
+// a blocked Lock call excludes new readers from acquiring the lock.
type RWMutex struct {
w Mutex // held if there are pending writers
writerSem uint32 // semaphore for writers to wait for completing readers
@@ -71,9 +77,6 @@ func (rw *RWMutex) RUnlock() {
// Lock locks rw for writing.
// If the lock is already locked for reading or writing,
// Lock blocks until the lock is available.
-// To ensure that the lock eventually becomes available,
-// a blocked Lock call excludes new readers from acquiring
-// the lock.
func (rw *RWMutex) Lock() {
if race.Enabled {
_ = rw.w.state
@@ -94,11 +97,11 @@ func (rw *RWMutex) Lock() {
}
}
-// Unlock unlocks rw for writing. It is a run-time error if rw is
+// Unlock unlocks rw for writing. It is a run-time error if rw is
// not locked for writing on entry to Unlock.
//
// As with Mutexes, a locked RWMutex is not associated with a particular
-// goroutine. One goroutine may RLock (Lock) an RWMutex and then
+// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
if race.Enabled {
diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go
index c77fec3..b386e1f 100644
--- a/libgo/go/sync/waitgroup.go
+++ b/libgo/go/sync/waitgroup.go
@@ -12,10 +12,14 @@ import (
// A WaitGroup waits for a collection of goroutines to finish.
// The main goroutine calls Add to set the number of
-// goroutines to wait for. Then each of the goroutines
-// runs and calls Done when finished. At the same time,
+// goroutines to wait for. Then each of the goroutines
+// runs and calls Done when finished. At the same time,
// Wait can be used to block until all goroutines have finished.
+//
+// A WaitGroup must not be copied after first use.
type WaitGroup struct {
+ noCopy noCopy
+
// 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
// 64-bit atomic operations require 64-bit alignment, but 32-bit
// compilers do not ensure it. So we allocate 12 bytes and then use