aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync/rwmutex.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/sync/rwmutex.go')
-rw-r--r--libgo/go/sync/rwmutex.go49
1 files changed, 25 insertions, 24 deletions
diff --git a/libgo/go/sync/rwmutex.go b/libgo/go/sync/rwmutex.go
index 0e8a58e..d438c93 100644
--- a/libgo/go/sync/rwmutex.go
+++ b/libgo/go/sync/rwmutex.go
@@ -5,6 +5,7 @@
package sync
import (
+ "internal/race"
"sync/atomic"
"unsafe"
)
@@ -27,17 +28,17 @@ const rwmutexMaxReaders = 1 << 30
// RLock locks rw for reading.
func (rw *RWMutex) RLock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceDisable()
+ race.Disable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
runtime_Semacquire(&rw.readerSem)
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(&rw.readerSem))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
}
}
@@ -46,14 +47,14 @@ func (rw *RWMutex) RLock() {
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (rw *RWMutex) RUnlock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceReleaseMerge(unsafe.Pointer(&rw.writerSem))
- raceDisable()
+ race.ReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
- raceEnable()
+ race.Enable()
panic("sync: RUnlock of unlocked RWMutex")
}
// A writer is pending.
@@ -62,8 +63,8 @@ func (rw *RWMutex) RUnlock() {
runtime_Semrelease(&rw.writerSem)
}
}
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
}
@@ -74,9 +75,9 @@ func (rw *RWMutex) RUnlock() {
// a blocked Lock call excludes new readers from acquiring
// the lock.
func (rw *RWMutex) Lock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceDisable()
+ race.Disable()
}
// First, resolve competition with other writers.
rw.w.Lock()
@@ -86,10 +87,10 @@ func (rw *RWMutex) Lock() {
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
runtime_Semacquire(&rw.writerSem)
}
- if raceenabled {
- raceEnable()
- raceAcquire(unsafe.Pointer(&rw.readerSem))
- raceAcquire(unsafe.Pointer(&rw.writerSem))
+ if race.Enabled {
+ race.Enable()
+ race.Acquire(unsafe.Pointer(&rw.readerSem))
+ race.Acquire(unsafe.Pointer(&rw.writerSem))
}
}
@@ -100,17 +101,17 @@ func (rw *RWMutex) Lock() {
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (rw *RWMutex) Unlock() {
- if raceenabled {
+ if race.Enabled {
_ = rw.w.state
- raceRelease(unsafe.Pointer(&rw.readerSem))
- raceRelease(unsafe.Pointer(&rw.writerSem))
- raceDisable()
+ race.Release(unsafe.Pointer(&rw.readerSem))
+ race.Release(unsafe.Pointer(&rw.writerSem))
+ race.Disable()
}
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
- raceEnable()
+ race.Enable()
panic("sync: Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
@@ -119,8 +120,8 @@ func (rw *RWMutex) Unlock() {
}
// Allow other writers to proceed.
rw.w.Unlock()
- if raceenabled {
- raceEnable()
+ if race.Enabled {
+ race.Enable()
}
}