aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/sync/mutex.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/sync/mutex.go')
-rw-r--r--libgo/go/sync/mutex.go18
1 files changed, 15 insertions, 3 deletions
diff --git a/libgo/go/sync/mutex.go b/libgo/go/sync/mutex.go
index 4c5582c..11ad20c 100644
--- a/libgo/go/sync/mutex.go
+++ b/libgo/go/sync/mutex.go
@@ -77,7 +77,11 @@ func (m *Mutex) Lock() {
}
return
}
+ // Slow path (outlined so that the fast path can be inlined)
+ m.lockSlow()
+}
+func (m *Mutex) lockSlow() {
var waitStartTime int64
starving := false
awoke := false
@@ -131,7 +135,7 @@ func (m *Mutex) Lock() {
if waitStartTime == 0 {
waitStartTime = runtime_nanotime()
}
- runtime_SemacquireMutex(&m.sema, queueLifo)
+ runtime_SemacquireMutex(&m.sema, queueLifo, 1)
starving = starving || runtime_nanotime()-waitStartTime > starvationThresholdNs
old = m.state
if old&mutexStarving != 0 {
@@ -180,6 +184,14 @@ func (m *Mutex) Unlock() {
// Fast path: drop lock bit.
new := atomic.AddInt32(&m.state, -mutexLocked)
+ if new != 0 {
+ // Outlined slow path to allow inlining the fast path.
+ // To hide unlockSlow during tracing we skip one extra frame when tracing GoUnblock.
+ m.unlockSlow(new)
+ }
+}
+
+func (m *Mutex) unlockSlow(new int32) {
if (new+mutexLocked)&mutexLocked == 0 {
throw("sync: unlock of unlocked mutex")
}
@@ -198,7 +210,7 @@ func (m *Mutex) Unlock() {
// Grab the right to wake someone.
new = (old - 1<<mutexWaiterShift) | mutexWoken
if atomic.CompareAndSwapInt32(&m.state, old, new) {
- runtime_Semrelease(&m.sema, false)
+ runtime_Semrelease(&m.sema, false, 1)
return
}
old = m.state
@@ -208,6 +220,6 @@ func (m *Mutex) Unlock() {
// Note: mutexLocked is not set, the waiter will set it after wakeup.
// But mutex is still considered locked if mutexStarving is set,
// so new coming goroutines won't acquire it.
- runtime_Semrelease(&m.sema, true)
+ runtime_Semrelease(&m.sema, true, 1)
}
}