aboutsummaryrefslogtreecommitdiff
path: root/linuxthreads/join.c
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
committerUlrich Drepper <drepper@redhat.com>2000-01-05 02:09:12 +0000
commit1d2fc9b3c59d0e83e04139ddf633731264b76ea2 (patch)
treec738cf2a40851dc25be2c252ba5dbb7f335b5e14 /linuxthreads/join.c
parentf19f2b34439145daf300bf12789bbc61c8d4db28 (diff)
downloadglibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.zip
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.gz
glibc-1d2fc9b3c59d0e83e04139ddf633731264b76ea2.tar.bz2
Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!).
2000-01-03 Kaz Kylheku <kaz@ashi.footprints.net> Redesigned how cancellation unblocks a thread from internal cancellation points (sem_wait, pthread_join, pthread_cond_{wait,timedwait}). Cancellation won't eat a signal in any of these functions (*required* by POSIX and Single Unix Spec!). * condvar.c: spontaneous wakeup on pthread_cond_timedwait won't eat a simultaneous condition variable signal (not required by POSIX or Single Unix Spec, but nice). * spinlock.c: __pthread_lock queues back any received restarts that don't belong to it instead of assuming ownership of lock upon any restart; fastlock can no longer be acquired by two threads simultaneously. * restart.h: restarts queue even on kernels that don't have queued real time signals (2.0, early 2.1), thanks to atomic counter, avoiding a rare race condition in pthread_cond_timedwait.
Diffstat (limited to 'linuxthreads/join.c')
-rw-r--r--linuxthreads/join.c48
1 files changed, 44 insertions, 4 deletions
diff --git a/linuxthreads/join.c b/linuxthreads/join.c
index 71db541..5e6b78a 100644
--- a/linuxthreads/join.c
+++ b/linuxthreads/join.c
@@ -79,12 +79,37 @@ void pthread_exit(void * retval)
_exit(0);
}
+/* Function called by pthread_cancel to remove the thread from
+ waiting on a condition variable queue. */
+
+static int join_extricate_func(void *obj, pthread_descr th)
+{
+ volatile pthread_descr self = thread_self();
+ pthread_handle handle = obj;
+ pthread_descr jo;
+ int did_remove = 0;
+
+ __pthread_lock(&handle->h_lock, self);
+ jo = handle->h_descr;
+ did_remove = jo->p_joining != NULL;
+ jo->p_joining = NULL;
+ __pthread_unlock(&handle->h_lock);
+
+ return did_remove;
+}
+
int pthread_join(pthread_t thread_id, void ** thread_return)
{
volatile pthread_descr self = thread_self();
struct pthread_request request;
pthread_handle handle = thread_handle(thread_id);
pthread_descr th;
+ pthread_extricate_if extr;
+ int already_canceled = 0;
+
+ /* Set up extrication interface */
+ extr.pu_object = handle;
+ extr.pu_extricate_func = join_extricate_func;
__pthread_lock(&handle->h_lock, self);
if (invalid_handle(handle, thread_id)) {
@@ -103,13 +128,28 @@ int pthread_join(pthread_t thread_id, void ** thread_return)
}
/* If not terminated yet, suspend ourselves. */
if (! th->p_terminated) {
- th->p_joining = self;
+ /* Register extrication interface */
+ __pthread_set_own_extricate_if(self, &extr);
+ if (!(THREAD_GETMEM(self, p_canceled)
+ && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
+ th->p_joining = self;
+ else
+ already_canceled = 1;
__pthread_unlock(&handle->h_lock);
- suspend_with_cancellation(self);
+
+ if (already_canceled) {
+ __pthread_set_own_extricate_if(self, 0);
+ pthread_exit(PTHREAD_CANCELED);
+ }
+
+ suspend(self);
+ /* Deregister extrication interface */
+ __pthread_set_own_extricate_if(self, 0);
+
/* This is a cancellation point */
- if (THREAD_GETMEM(self, p_canceled)
+ if (THREAD_GETMEM(self, p_woken_by_cancel)
&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
- th->p_joining = NULL;
+ THREAD_SETMEM(self, p_woken_by_cancel, 0);
pthread_exit(PTHREAD_CANCELED);
}
__pthread_lock(&handle->h_lock, self);