diff options
-rw-r--r-- | nptl/pthread_cond_wait.c | 4 | ||||
-rw-r--r-- | nptl/pthread_mutex_timedlock.c | 6 | ||||
-rw-r--r-- | nptl/pthread_rwlock_common.c | 14 | ||||
-rw-r--r-- | nptl/sem_waitcommon.c | 2 |
4 files changed, 13 insertions, 13 deletions
diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c index 685dbca..02d11c6 100644 --- a/nptl/pthread_cond_wait.c +++ b/nptl/pthread_cond_wait.c @@ -506,7 +506,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, __pthread_cleanup_pop (&buffer, 0); - if (__glibc_unlikely (err == ETIMEDOUT)) + if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW)) { __condvar_dec_grefs (cond, g, private); /* If we timed out, we effectively cancel waiting. Note that @@ -515,7 +515,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, __condvar_quiesce_and_switch_g1 and us trying to acquire the lock during cancellation is not possible. */ __condvar_cancel_waiting (cond, seq, g, private); - result = ETIMEDOUT; + result = err; goto done; } else diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c index 74adffe..6c72a36 100644 --- a/nptl/pthread_mutex_timedlock.c +++ b/nptl/pthread_mutex_timedlock.c @@ -270,7 +270,7 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex, oldval, clockid, abstime, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); /* The futex call timed out. */ - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) return err; /* Reload current lock value. */ oldval = mutex->__data.__lock; @@ -550,8 +550,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex, int e = __futex_abstimed_wait64 ( (unsigned int *) &mutex->__data.__lock, ceilval | 2, clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex)); - if (e == ETIMEDOUT) - return ETIMEDOUT; + if (e == ETIMEDOUT || e == EOVERFLOW) + return e; } } while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, diff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c index 4c9f582..9ef432c 100644 --- a/nptl/pthread_rwlock_common.c +++ b/nptl/pthread_rwlock_common.c @@ -334,7 +334,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, private); /* We ignore EAGAIN and EINTR. On time-outs, we can just return because we don't need to clean up anything. */ - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) return err; } /* It makes sense to not break out of the outer loop here @@ -460,7 +460,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex, 1 | PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private); - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) { /* If we timed out, we need to unregister. If no read phase has been installed while we waited, we can just decrement @@ -479,7 +479,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, if (atomic_compare_exchange_weak_relaxed (&rwlock->__data.__readers, &r, r - (1 << PTHREAD_RWLOCK_READER_SHIFT))) - return ETIMEDOUT; + return err; /* TODO Back-off. */ } /* Use the acquire MO fence to mirror the steps taken in the @@ -730,7 +730,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, int err = __futex_abstimed_wait64 (&rwlock->__data.__writers_futex, 1 | PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private); - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) { if (prefer_writer) { @@ -758,7 +758,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, } /* We cleaned up and cannot have stolen another waiting writer's futex wake-up, so just return. */ - return ETIMEDOUT; + return err; } /* If we got interrupted (EINTR) or the futex word does not have the expected value (EAGAIN), retry after reloading __readers. */ @@ -829,7 +829,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex, PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private); - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) { if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP) { @@ -861,7 +861,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0) futex_wake (&rwlock->__data.__writers_futex, 1, private); - return ETIMEDOUT; + return err; } /* TODO Back-off. */ } diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c index 6dd4eb9..0ac1f13 100644 --- a/nptl/sem_waitcommon.c +++ b/nptl/sem_waitcommon.c @@ -191,7 +191,7 @@ __new_sem_wait_slow64 (struct new_sem *sem, clockid_t clockid, documentation. Before Linux 2.6.22, EINTR was also returned on spurious wake-ups; we only support more recent Linux versions, so do not need to consider this here.) */ - if (err == ETIMEDOUT || err == EINTR) + if (err == ETIMEDOUT || err == EINTR || err == EOVERFLOW) { __set_errno (err); err = -1; |