aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-07-24 07:43:31 +0000
committerUlrich Drepper <drepper@redhat.com>2000-07-24 07:43:31 +0000
commit5122880a26a0c71e64ab45839d5f37d7fa8f42fc (patch)
tree0eb8e6794b43e8ea89e2d306c24011f3d7fe26da
parent7603ea28d333d47f8e45e33515362ec55d381877 (diff)
downloadglibc-5122880a26a0c71e64ab45839d5f37d7fa8f42fc.zip
glibc-5122880a26a0c71e64ab45839d5f37d7fa8f42fc.tar.gz
glibc-5122880a26a0c71e64ab45839d5f37d7fa8f42fc.tar.bz2
Update.
2000-07-24 Ulrich Drepper <drepper@redhat.com> * condvar.c: Handle spurious wakeups. [PR libc/1749]. * spinlock.h: If IMPLEMENT_TAS_WITH_CAS is defined use
-rw-r--r--linuxthreads/ChangeLog6
-rw-r--r--linuxthreads/condvar.c88
2 files changed, 72 insertions, 22 deletions
diff --git a/linuxthreads/ChangeLog b/linuxthreads/ChangeLog
index a151848..c4247b2 100644
--- a/linuxthreads/ChangeLog
+++ b/linuxthreads/ChangeLog
@@ -1,6 +1,10 @@
+2000-07-24 Ulrich Drepper <drepper@redhat.com>
+
+ * condvar.c: Handle spurious wakeups. [PR libc/1749].
+
2000-07-21 Ulrich Drepper <drepper@redhat.com>
- * linuxthreads/spinlock.h: If IMPLEMENT_TAS_WITH_CAS is defined use
+ * spinlock.h: If IMPLEMENT_TAS_WITH_CAS is defined use
__compare_and_swap to define testandset.
* linuxthreads/sysdeps/powerpc/pt-machine.h: Add volatile to asms.
Define IMPLEMENT_TAS_WITH_CAS.
diff --git a/linuxthreads/condvar.c b/linuxthreads/condvar.c
index a6a31ca..ae1cef1 100644
--- a/linuxthreads/condvar.c
+++ b/linuxthreads/condvar.c
@@ -60,6 +60,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
volatile pthread_descr self = thread_self();
pthread_extricate_if extr;
int already_canceled = 0;
+ int spurious_wakeup_count;
/* Check whether the mutex is locked and owned by this thread. */
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
@@ -72,6 +73,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
extr.pu_extricate_func = cond_extricate_func;
/* Register extrication interface */
+ THREAD_SETMEM(self, p_condvar_avail, 0);
__pthread_set_own_extricate_if(self, &extr);
/* Atomically enqueue thread for waiting, but only if it is not
@@ -96,7 +98,20 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
pthread_mutex_unlock(mutex);
- suspend(self);
+ spurious_wakeup_count = 0;
+ while (1)
+ {
+ suspend(self);
+ if (THREAD_GETMEM(self, p_condvar_avail) == 0
+ && THREAD_GETMEM(self, p_woken_by_cancel) == 0)
+ {
+ /* Count resumes that don't belong to us. */
+ spurious_wakeup_count++;
+ continue;
+ }
+ break;
+ }
+
__pthread_set_own_extricate_if(self, 0);
/* Check for cancellation again, to provide correct cancellation
@@ -109,6 +124,10 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
pthread_exit(PTHREAD_CANCELED);
}
+ /* Put back any resumes we caught that don't belong to us. */
+ while (spurious_wakeup_count--)
+ restart(self);
+
pthread_mutex_lock(mutex);
return 0;
}
@@ -121,6 +140,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
volatile pthread_descr self = thread_self();
int already_canceled = 0;
pthread_extricate_if extr;
+ int spurious_wakeup_count;
/* Check whether the mutex is locked and owned by this thread. */
if (mutex->__m_kind != PTHREAD_MUTEX_TIMED_NP
@@ -133,6 +153,7 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
extr.pu_extricate_func = cond_extricate_func;
/* Register extrication interface */
+ THREAD_SETMEM(self, p_condvar_avail, 0);
__pthread_set_own_extricate_if(self, &extr);
/* Enqueue to wait on the condition and check for cancellation. */
@@ -151,26 +172,39 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
pthread_mutex_unlock(mutex);
- if (!timedsuspend(self, abstime)) {
- int was_on_queue;
-
- /* __pthread_lock will queue back any spurious restarts that
- may happen to it. */
-
- __pthread_lock(&cond->__c_lock, self);
- was_on_queue = remove_from_queue(&cond->__c_waiting, self);
- __pthread_unlock(&cond->__c_lock);
-
- if (was_on_queue) {
- __pthread_set_own_extricate_if(self, 0);
- pthread_mutex_lock(mutex);
- return ETIMEDOUT;
+ spurious_wakeup_count = 0;
+ while (1)
+ {
+ if (!timedsuspend(self, abstime)) {
+ int was_on_queue;
+
+ /* __pthread_lock will queue back any spurious restarts that
+ may happen to it. */
+
+ __pthread_lock(&cond->__c_lock, self);
+ was_on_queue = remove_from_queue(&cond->__c_waiting, self);
+ __pthread_unlock(&cond->__c_lock);
+
+ if (was_on_queue) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_mutex_lock(mutex);
+ return ETIMEDOUT;
+ }
+
+ /* Eat the outstanding restart() from the signaller */
+ suspend(self);
+ }
+
+ if (THREAD_GETMEM(self, p_condvar_avail) == 0
+ && THREAD_GETMEM(self, p_woken_by_cancel) == 0)
+ {
+ /* Count resumes that don't belong to us. */
+ spurious_wakeup_count++;
+ continue;
+ }
+ break;
}
- /* Eat the outstanding restart() from the signaller */
- suspend(self);
- }
-
__pthread_set_own_extricate_if(self, 0);
/* The remaining logic is the same as in other cancellable waits,
@@ -183,6 +217,10 @@ pthread_cond_timedwait_relative(pthread_cond_t *cond,
pthread_exit(PTHREAD_CANCELED);
}
+ /* Put back any resumes we caught that don't belong to us. */
+ while (spurious_wakeup_count--)
+ restart(self);
+
pthread_mutex_lock(mutex);
return 0;
}
@@ -201,7 +239,11 @@ int pthread_cond_signal(pthread_cond_t *cond)
__pthread_lock(&cond->__c_lock, NULL);
th = dequeue(&cond->__c_waiting);
__pthread_unlock(&cond->__c_lock);
- if (th != NULL) restart(th);
+ if (th != NULL) {
+ th->p_condvar_avail = 1;
+ WRITE_MEMORY_BARRIER();
+ restart(th);
+ }
return 0;
}
@@ -215,7 +257,11 @@ int pthread_cond_broadcast(pthread_cond_t *cond)
cond->__c_waiting = NULL;
__pthread_unlock(&cond->__c_lock);
/* Now signal each process in the queue */
- while ((th = dequeue(&tosignal)) != NULL) restart(th);
+ while ((th = dequeue(&tosignal)) != NULL) {
+ th->p_condvar_avail = 1;
+ WRITE_MEMORY_BARRIER();
+ restart(th);
+ }
return 0;
}