aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@google.com>2016-02-03 21:58:02 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-02-03 21:58:02 +0000
commitf98dd1a338867a408f7c72d73fbad7fe7fc93e3a (patch)
tree2f8da9862a9c1fe0df138917f997b03439c02773 /libgo/go/sync
parentb081ed4efc144da0c45a6484aebfd10e0eb9fda3 (diff)
downloadgcc-f98dd1a338867a408f7c72d73fbad7fe7fc93e3a.zip
gcc-f98dd1a338867a408f7c72d73fbad7fe7fc93e3a.tar.gz
gcc-f98dd1a338867a408f7c72d73fbad7fe7fc93e3a.tar.bz2
libgo: Update to go1.6rc1.
Reviewed-on: https://go-review.googlesource.com/19200 From-SVN: r233110
Diffstat (limited to 'libgo/go/sync')
-rw-r--r--libgo/go/sync/cond.go21
-rw-r--r--libgo/go/sync/export_test.go2
-rw-r--r--libgo/go/sync/mutex.go13
-rw-r--r--libgo/go/sync/pool.go5
-rw-r--r--libgo/go/sync/race.go42
-rw-r--r--libgo/go/sync/race0.go34
-rw-r--r--libgo/go/sync/rwmutex.go49
-rw-r--r--libgo/go/sync/waitgroup.go33
-rw-r--r--libgo/go/sync/waitgroup_test.go3
9 files changed, 65 insertions, 137 deletions
diff --git a/libgo/go/sync/cond.go b/libgo/go/sync/cond.go
index 9e6bc17..0aefcda 100644
--- a/libgo/go/sync/cond.go
+++ b/libgo/go/sync/cond.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -51,12 +52,12 @@ func NewCond(l Locker) *Cond {
//
func (c *Cond) Wait() {
c.checker.check()
- if raceenabled {
- raceDisable()
+ if race.Enabled {
+ race.Disable()
}
atomic.AddUint32(&c.waiters, 1)
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
c.L.Unlock()
runtime_Syncsemacquire(&c.sema)
@@ -81,14 +82,14 @@ func (c *Cond) Broadcast() {
func (c *Cond) signalImpl(all bool) {
c.checker.check()
- if raceenabled {
- raceDisable()
+ if race.Enabled {
+ race.Disable()
}
for {
old := atomic.LoadUint32(&c.waiters)
if old == 0 {
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
return
}
@@ -97,8 +98,8 @@ func (c *Cond) signalImpl(all bool) {
new = 0
}
if atomic.CompareAndSwapUint32(&c.waiters, old, new) {
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
runtime_Syncsemrelease(&c.sema, old-new)
return
diff --git a/libgo/go/sync/export_test.go b/libgo/go/sync/export_test.go
index 6f49b3b..fa5983a 100644
--- a/libgo/go/sync/export_test.go
+++ b/libgo/go/sync/export_test.go
@@ -7,5 +7,3 @@ package sync
// Export for testing.
var Runtime_Semacquire = runtime_Semacquire
var Runtime_Semrelease = runtime_Semrelease
-
-const RaceEnabled = raceenabled
diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go
index 3f280ad..eb52614 100644
--- a/libgo/go/sync/mutex.go
+++ b/libgo/go/sync/mutex.go
@@ -11,6 +11,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -41,8 +42,8 @@ const (
func (m *Mutex) Lock() {
// Fast path: grab unlocked mutex.
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
- if raceenabled {
- raceAcquire(unsafe.Pointer(m))
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
}
return
}
@@ -85,8 +86,8 @@ func (m *Mutex) Lock() {
}
}
- if raceenabled {
- raceAcquire(unsafe.Pointer(m))
+ if race.Enabled {
+ race.Acquire(unsafe.Pointer(m))
}
}
@@ -97,9 +98,9 @@ func (m *Mutex) Lock() {
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
- if raceenabled {
+ if race.Enabled {
_ = m.state
- raceRelease(unsafe.Pointer(m))
+ race.Release(unsafe.Pointer(m))
}
// Fast path: drop lock bit.
diff --git a/libgo/go/sync/pool.go b/libgo/go/sync/pool.go
index 0cf06370..381af0b 100644
--- a/libgo/go/sync/pool.go
+++ b/libgo/go/sync/pool.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"runtime"
"sync/atomic"
"unsafe"
@@ -59,7 +60,7 @@ type poolLocal struct {
// Put adds x to the pool.
func (p *Pool) Put(x interface{}) {
- if raceenabled {
+ if race.Enabled {
// Under race detector the Pool degenerates into no-op.
// It's conforming, simple and does not introduce excessive
// happens-before edges between unrelated goroutines.
@@ -91,7 +92,7 @@ func (p *Pool) Put(x interface{}) {
// If Get would otherwise return nil and p.New is non-nil, Get returns
// the result of calling p.New.
func (p *Pool) Get() interface{} {
- if raceenabled {
+ if race.Enabled {
if p.New != nil {
return p.New()
}
diff --git a/libgo/go/sync/race.go b/libgo/go/sync/race.go
deleted file mode 100644
index fd0277d..0000000
--- a/libgo/go/sync/race.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build race
-
-package sync
-
-import (
- "runtime"
- "unsafe"
-)
-
-const raceenabled = true
-
-func raceAcquire(addr unsafe.Pointer) {
- runtime.RaceAcquire(addr)
-}
-
-func raceRelease(addr unsafe.Pointer) {
- runtime.RaceRelease(addr)
-}
-
-func raceReleaseMerge(addr unsafe.Pointer) {
- runtime.RaceReleaseMerge(addr)
-}
-
-func raceDisable() {
- runtime.RaceDisable()
-}
-
-func raceEnable() {
- runtime.RaceEnable()
-}
-
-func raceRead(addr unsafe.Pointer) {
- runtime.RaceRead(addr)
-}
-
-func raceWrite(addr unsafe.Pointer) {
- runtime.RaceWrite(addr)
-}
diff --git a/libgo/go/sync/race0.go b/libgo/go/sync/race0.go
deleted file mode 100644
index 65ada1c..0000000
--- a/libgo/go/sync/race0.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !race
-
-package sync
-
-import (
- "unsafe"
-)
-
-const raceenabled = false
-
-func raceAcquire(addr unsafe.Pointer) {
-}
-
-func raceRelease(addr unsafe.Pointer) {
-}
-
-func raceReleaseMerge(addr unsafe.Pointer) {
-}
-
-func raceDisable() {
-}
-
-func raceEnable() {
-}
-
-func raceRead(addr unsafe.Pointer) {
-}
-
-func raceWrite(addr unsafe.Pointer) {
-}
diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go
index 0e8a58e..d438c93 100644
--- a/libgo/go/sync/rwmutex.go
+++ b/libgo/go/sync/rwmutex.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -27,17 +28,17 @@ const rwmutexMaxReaders = 1 << 30
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceDisable()
+ race.Disable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
runtime_Semacquire(&rw.readerSem)
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(&rw.readerSem))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
}
}
@@ -46,14 +47,14 @@ func (rw *RWMutex) RLock() {
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
- raceDisable()
+ race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
- raceEnable()
+ race.Enable()
panic("sync: RUnlock of unlocked RWMutex")
}
// A writer is pending.
@@ -62,8 +63,8 @@ func (rw *RWMutex) RUnlock() {
runtime_Semrelease(&rw.writerSem)
}
}
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
}
@@ -74,9 +75,9 @@ func (rw *RWMutex) RUnlock() {
// a blocked Lock call excludes new readers from acquiring
// the lock.
func (rw *RWMutex) Lock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceDisable()
+ race.Disable()
}
// First, resolve competition with other writers.
rw.w.Lock()
@@ -86,10 +87,10 @@ func (rw *RWMutex) Lock() {
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
runtime_Semacquire(&rw.writerSem)
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(&rw.readerSem))
- raceAcquire(unsafe.Pointer(&rw.writerSem))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ race.Acquire(unsafe.Pointer(&rw.writerSem))
}
}
@@ -100,17 +101,17 @@ func (rw *RWMutex) Lock() {
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceRelease(unsafe.Pointer(&rw.readerSem))
- raceRelease(unsafe.Pointer(&rw.writerSem))
- raceDisable()
+ race.Release(unsafe.Pointer(&rw.readerSem))
+ race.Release(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
}
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
- raceEnable()
+ race.Enable()
panic("sync: Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
@@ -119,8 +120,8 @@ func (rw *RWMutex) Unlock() {
}
// Allow other writers to proceed.
rw.w.Unlock()
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
}
diff --git a/libgo/go/sync/waitgroup.go b/libgo/go/sync/waitgroup.go
index de399e6..c77fec3 100644
--- a/libgo/go/sync/waitgroup.go
+++ b/libgo/go/sync/waitgroup.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -46,24 +47,24 @@ func (wg *WaitGroup) state() *uint64 {
// See the WaitGroup example.
func (wg *WaitGroup) Add(delta int) {
statep := wg.state()
- if raceenabled {
+ if race.Enabled {
_ = *statep // trigger nil deref early
if delta < 0 {
// Synchronize decrements with Wait.
- raceReleaseMerge(unsafe.Pointer(wg))
+ race.ReleaseMerge(unsafe.Pointer(wg))
}
- raceDisable()
- defer raceEnable()
+ race.Disable()
+ defer race.Enable()
}
state := atomic.AddUint64(statep, uint64(delta)<<32)
v := int32(state >> 32)
w := uint32(state)
- if raceenabled {
+ if race.Enabled {
if delta > 0 && v == int32(delta) {
// The first increment must be synchronized with Wait.
// Need to model this as a read, because there can be
// several concurrent wg.counter transitions from 0.
- raceRead(unsafe.Pointer(&wg.sema))
+ race.Read(unsafe.Pointer(&wg.sema))
}
}
if v < 0 {
@@ -98,9 +99,9 @@ func (wg *WaitGroup) Done() {
// Wait blocks until the WaitGroup counter is zero.
func (wg *WaitGroup) Wait() {
statep := wg.state()
- if raceenabled {
+ if race.Enabled {
_ = *statep // trigger nil deref early
- raceDisable()
+ race.Disable()
}
for {
state := atomic.LoadUint64(statep)
@@ -108,28 +109,28 @@ func (wg *WaitGroup) Wait() {
w := uint32(state)
if v == 0 {
// Counter is 0, no need to wait.
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(wg))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
}
return
}
// Increment waiters count.
if atomic.CompareAndSwapUint64(statep, state, state+1) {
- if raceenabled && w == 0 {
+ if race.Enabled && w == 0 {
// Wait must be synchronized with the first Add.
// Need to model this is as a write to race with the read in Add.
// As a consequence, can do the write only for the first waiter,
// otherwise concurrent Waits will race with each other.
- raceWrite(unsafe.Pointer(&wg.sema))
+ race.Write(unsafe.Pointer(&wg.sema))
}
runtime_Semacquire(&wg.sema)
if *statep != 0 {
panic("sync: WaitGroup is reused before previous Wait has returned")
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(wg))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(wg))
}
return
}
diff --git a/libgo/go/sync/waitgroup_test.go b/libgo/go/sync/waitgroup_test.go
index 3e3e3bf..a581660 100644
--- a/libgo/go/sync/waitgroup_test.go
+++ b/libgo/go/sync/waitgroup_test.go
@@ -5,6 +5,7 @@
package sync_test
import (
+ "internal/race"
"runtime"
. "sync"
"sync/atomic"
@@ -48,7 +49,7 @@ func TestWaitGroup(t *testing.T) {
}
func knownRacy(t *testing.T) {
- if RaceEnabled {
+ if race.Enabled {
t.Skip("skipping known-racy test under the race detector")
}
}