aboutsummaryrefslogtreecommitdiff
path: root/libmudflap/mf-hooks3.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2005-07-16 19:29:00 -0700
committerRichard Henderson <rth@gcc.gnu.org>2005-07-16 19:29:00 -0700
commit7544a87f135c194395e525c7ca6123f4fd9f51ea (patch)
tree7eb9ea11806846cc5d60256f51b3b0a1033126fe /libmudflap/mf-hooks3.c
parentf8820d33eba7e777142fbb8cb0772cc9dc0fe7c2 (diff)
downloadgcc-7544a87f135c194395e525c7ca6123f4fd9f51ea.zip
gcc-7544a87f135c194395e525c7ca6123f4fd9f51ea.tar.gz
gcc-7544a87f135c194395e525c7ca6123f4fd9f51ea.tar.bz2
gcc.c (MFWRAP_SPEC): Don't wrap pthread_join or pthread_exit.
* gcc.c (MFWRAP_SPEC): Don't wrap pthread_join or pthread_exit. * acinclude.m4: New file. * configure.ac: Invoke LIBMUDFLAP_CHECK_TLS. * configure, config.h.in, Makefile.in, testsuite/Makefile.in: Rebuild. * mf-hooks1.c (__mf_0fn_malloc): Move body from ... (__mf_0fn_calloc): ... here. * mf-hooks3.c (struct pthread_info): Remove. (__mf_pthread_info, __mf_pthread_info_idx): Remove. (LIBMUDFLAPTH_THREADS_MAX): Set to 1021. (struct mf_thread_data): New. (mf_thread_data, mf_thread_data_lock): New. (__mf_allocate_blank_threadinfo): Remove. (__mf_find_threadinfo): Rewrite and simplify. Only use if TLS is not available. (__mf_state_perthread): Remove. (__mf_get_state, __mf_set_state): New. (__mf_pthread_cleanup): Use &errno, rather than saved pointer. Update mf_thread_data killing procedure. (__mf_pthread_spawner): Similarly. (__mf_0fn_pthread_create): Only use wrapper if necessary. Remove code to allocate thread stack space. (__mf_0fn_pthread_join, pthread_join): Remove. (__mf_0fn_pthread_exit, pthread_exit): Remove. * mf-impl.h (dyn_pthread_join, dyn_pthread_exit): Remove. (__mf_state_1): Rename from __mf_state; use TLS when available. (__mf_get_state, __mf_set_state): New. Update all users. * mf-runtime.c (begin_recursion_protect1): New. (BEGIN_RECURSION_PROTECT): Use it. (__mf_state_1): Rename from __mf_state; use TLS when available. (threads_active_p): Remove. (__mf_usage): Compute it directly. From-SVN: r102108
Diffstat (limited to 'libmudflap/mf-hooks3.c')
-rw-r--r--libmudflap/mf-hooks3.c608
1 files changed, 164 insertions, 444 deletions
diff --git a/libmudflap/mf-hooks3.c b/libmudflap/mf-hooks3.c
index f980c9b..f3006b2 100644
--- a/libmudflap/mf-hooks3.c
+++ b/libmudflap/mf-hooks3.c
@@ -1,5 +1,5 @@
/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
- Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
+ Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
Contributed by Frank Ch. Eigler <fche@redhat.com>
and Graydon Hoare <graydon@redhat.com>
@@ -52,14 +52,10 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/mman.h>
#include <unistd.h>
#include <assert.h>
#include <errno.h>
-#include <limits.h>
-#include <sched.h>
-#include <fcntl.h>
+#include <stdbool.h>
#include "mf-runtime.h"
#include "mf-impl.h"
@@ -68,276 +64,214 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#error "Do not compile this file with -fmudflap!"
#endif
-
-/* Multithreading support hooks. */
-
-
-
#ifndef LIBMUDFLAPTH
#error "pthreadstuff is to be included only in libmudflapth"
#endif
+/* ??? Why isn't this done once in the header files. */
+DECLARE(void *, malloc, size_t sz);
+DECLARE(void, free, void *ptr);
+DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
+ void * (*start) (void *), void *arg);
-/* Describe a thread (dead or alive). */
-struct pthread_info
-{
- short used_p; /* Is this slot in use? */
- short dead_p; /* Is this thread dead? */
- pthread_t self; /* The thread id. */
-
- /* If libmudflapth allocated the stack, store its adjusted base/size. */
- void *stack;
- size_t stack_size;
- /* The _alloc fields store unadjusted values from the moment of allocation. */
- void *stack_alloc;
- size_t stack_size_alloc;
-
- int *thread_errno;
- enum __mf_state_enum state;
-};
+/* Multithreading support hooks. */
-/* Describe the startup information for a new user thread. */
-struct pthread_start_info
-{
- /* The user's thread entry point and argument. */
- void * (*user_fn)(void *);
- void *user_arg;
-
- /* Set by user thread when this startup struct may be disposed of. */
- struct pthread_info *thread_info;
-};
+#ifndef HAVE_TLS
+/* We don't have TLS. Ordinarily we could use pthread keys, but since we're
+ commandeering malloc/free that presents a few problems. The first is that
+ we'll recurse from __mf_get_state to pthread_setspecific to malloc back to
+ __mf_get_state during thread startup. This can be solved with clever uses
+ of a mutex. The second problem is that thread shutdown is indistinguishable
+ from thread startup, since libpthread is deallocating our state variable.
+ I've no good solution for this.
+ Which leaves us to handle this mess by totally by hand. */
+/* Yes, we want this prime. If pthread_t is a pointer, it's almost always
+ page aligned, and if we use a smaller power of 2, this results in "%N"
+ being the worst possible hash -- all threads hash to zero. */
+#define LIBMUDFLAPTH_THREADS_MAX 1021
+struct mf_thread_data
+{
+ pthread_t self;
+ unsigned char used_p;
+ unsigned char state;
+};
-/* To avoid dynamic memory allocation, use static array to store these
- thread description structs. The second (_idx) array is used as a
- simple caching hash table, mapping PTHREAD_HASH(thread) to its
- index in __mf_pthread_info[]. */
+static struct mf_thread_data mf_thread_data[LIBMUDFLAPTH_THREADS_MAX];
+static pthread_mutex_t mf_thread_data_lock = PTHREAD_MUTEX_INITIALIZER;
-#define LIBMUDFLAPTH_THREADS_MAX 1024
-static struct pthread_info __mf_pthread_info[LIBMUDFLAPTH_THREADS_MAX];
-static unsigned __mf_pthread_info_idx[LIBMUDFLAPTH_THREADS_MAX];
-#define PTHREAD_HASH(p) ((unsigned) (p) % LIBMUDFLAPTH_THREADS_MAX)
+/* Try to identify the main thread when filling in mf_thread_data. We
+ should always be called at least once from the main thread before
+ any new threads are spawned. */
+static int main_seen_p;
+#define PTHREAD_HASH(p) ((unsigned long) (p) % LIBMUDFLAPTH_THREADS_MAX)
-/* Find any old empty entry in __mf_pthread_info; mark it used and
- return it. Return NULL if there are no more available slots. */
-struct pthread_info*
-__mf_allocate_blank_threadinfo (unsigned* idx)
+static struct mf_thread_data *
+__mf_find_threadinfo (int alloc)
{
- static unsigned probe = LIBMUDFLAPTH_THREADS_MAX-1;
- unsigned probe_at_start = probe;
- static pthread_mutex_t mutex =
-#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
- PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
-#else
- PTHREAD_MUTEX_INITIALIZER;
+ pthread_t self = pthread_self ();
+ unsigned long hash = PTHREAD_HASH (self);
+ unsigned long rehash;
+
+#ifdef __alpha__
+ /* Alpha has the loosest memory ordering rules of all. We need a memory
+ barrier to flush the reorder buffer before considering a *read* of a
+ shared variable. Since we're not always taking a lock, we have to do
+ this by hand. */
+ __sync_synchronize ();
#endif
- int rc;
- rc = pthread_mutex_lock (& mutex);
- assert (rc == 0);
+ rehash = hash;
+ while (1)
+ {
+ if (mf_thread_data[rehash].used_p && mf_thread_data[rehash].self == self)
+ return &mf_thread_data[rehash];
+
+ rehash += 7;
+ if (rehash >= LIBMUDFLAPTH_THREADS_MAX)
+ rehash -= LIBMUDFLAPTH_THREADS_MAX;
+ if (rehash == hash)
+ break;
+ }
- /* Look for a blank spot starting one past the last one we found. */
- do
+ if (alloc)
{
- probe = (probe + 1) % LIBMUDFLAPTH_THREADS_MAX;
- struct pthread_info* pi = & __mf_pthread_info [probe];
- if (! pi->used_p)
+ pthread_mutex_lock (&mf_thread_data_lock);
+
+ rehash = hash;
+ while (1)
{
- /* memset (pi, 0, sizeof (*pi)); */
- pi->used_p = 1;
- if (idx != NULL) *idx = probe;
- /* VERBOSE_TRACE ("allocated threadinfo slot %u\n", probe); */
- rc = pthread_mutex_unlock (& mutex);
- assert (rc == 0);
- return pi;
+ if (!mf_thread_data[rehash].used_p)
+ {
+ mf_thread_data[rehash].self = self;
+ __sync_synchronize ();
+ mf_thread_data[rehash].used_p = 1;
+
+ pthread_mutex_unlock (&mf_thread_data_lock);
+ return &mf_thread_data[rehash];
+ }
+
+ rehash += 7;
+ if (rehash >= LIBMUDFLAPTH_THREADS_MAX)
+ rehash -= LIBMUDFLAPTH_THREADS_MAX;
+ if (rehash == hash)
+ break;
}
+
+ pthread_mutex_unlock (&mf_thread_data_lock);
}
- while (probe != probe_at_start);
- rc = pthread_mutex_unlock (& mutex);
- assert (rc == 0);
return NULL;
}
-
-/* Find and return the pthread_info struct for the current thread.
- There might already be one in __mf_pthread_info for this thread, in
- which case return it. There may not be one (if this is a main
- thread, an auxiliary -lpthread manager, or an actual user thread
- making an early call into libmudflap. In these cases, create a new
- entry. If not it's not the main thread, put it into reentrant
- initial state.
-
- NB: VERBOSE_TRACE type functions are not generally safe to call
- from this context, since a new thread might just be "booting up",
- making printf unsafe to call.
-*/
-static struct pthread_info*
-__mf_find_threadinfo ()
+enum __mf_state_enum
+__mf_get_state (void)
{
- pthread_t it = pthread_self ();
- unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
- struct pthread_info *result = NULL;
- static pthread_t last;
- static int main_thread_seen_p;
-
- /* Check out the lookup cache; failing that, do a linear search
- around the table. */
- {
- struct pthread_info* pi = & __mf_pthread_info [*hash];
- unsigned i;
-
- if (pi->used_p && pi->self == it)
- result = pi;
- else for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
- {
- struct pthread_info* pi2 = & __mf_pthread_info [i];
- if (pi2->used_p && pi2->self == it)
- {
- *hash = i;
- result = pi2;
- break;
- }
- }
- }
-
- if (result == NULL)
- {
- /* Create a __mf_pthread_info record for the main thread. It's
- different from the auto-recognized worker bees because for
- example we can assume that it's a fully stack/errno-equipped
- thread. */
-
- /* This must be the main thread, until now unseen in libmudflap. */
- unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
- struct pthread_info* pi = __mf_allocate_blank_threadinfo (hash);
- assert (pi != NULL);
- assert (pi->used_p);
- result = pi;
- result->self = it;
-
- if (! main_thread_seen_p)
- {
- result->state = active;
- /* NB: leave result->thread_errno unset, as main thread's errno
- has already been registered in __mf_init. */
- /* NB: leave stack-related fields unset, to avoid
- deallocation. */
- main_thread_seen_p = 1;
- /* VERBOSE_TRACE ("identified self as main thread\n"); */
- }
- else
- {
- result->state = reentrant;
- /* NB: leave result->thread_errno unset, as worker thread's
- errno is unlikely to be used, and user threads fill them
- in during __mf_pthread_spawn(). */
- /* NB: leave stack-related fields unset, leaving pthread_create
- to fill them in for user threads, leaving them empty for
- other threads. */
- /* VERBOSE_TRACE ("identified self as new aux or user thread\n"); */
- }
- }
-
- if (last != it)
+ struct mf_thread_data *data = __mf_find_threadinfo (0);
+ if (data)
+ return data->state;
+
+ /* The main thread needs to default to active state, so that the global
+ constructors are processed in the active state. Child threads should
+ be considered to be in the reentrant state, so that we don't wind up
+ doing Screwy Things inside the thread library; it'll get reset to
+ active state in __mf_pthread_spawner before user code is invoked.
+
+ The trickiest bit here is that the LinuxThreads pthread_manager thread
+ should *always* be considered to be reentrant, so that none of our
+ hooks actually do anything. Why? Because that thread isn't a real
+ thread from the point of view of the thread library, and so lots of
+ stuff isn't initialized, leading to SEGV very quickly. Even calling
+ pthread_self is a bit suspect, but it happens to work. */
+
+ if (main_seen_p)
+ return reentrant;
+ else
{
- /*
- VERBOSE_TRACE ("found threadinfo for %u, slot %u\n",
- (unsigned) it,
- (unsigned) *hash);
- */
- last = it;
+ main_seen_p = 1;
+ data = __mf_find_threadinfo (1);
+ data->state = active;
+ return active;
}
+}
- assert (result != NULL);
- assert (result->self == it);
-
- return result;
+void
+__mf_set_state (enum __mf_state_enum new_state)
+{
+ struct mf_thread_data *data = __mf_find_threadinfo (1);
+ data->state = new_state;
}
+#endif
+/* The following two functions are used only with __mf_opts.heur_std_data.
+ We're interested in recording the location of the thread-local errno
+ variable.
+ Note that this doesn't handle TLS references in general; we have no
+ visibility into __tls_get_data for when that memory is allocated at
+ runtime. Hopefully we get to see the malloc or mmap operation that
+ eventually allocates the backing store. */
-/* Return a pointer to the per-thread __mf_state variable. */
-enum __mf_state_enum *
-__mf_state_perthread ()
+/* Describe the startup information for a new user thread. */
+struct mf_thread_start_info
{
- assert (! __mf_starting_p);
- return & (__mf_find_threadinfo()->state);
-}
+ /* The user's thread entry point and argument. */
+ void * (*user_fn)(void *);
+ void *user_arg;
+};
static void
__mf_pthread_cleanup (void *arg)
{
- struct pthread_info *pi = arg;
-
- /* XXX: This unregistration is not safe on platforms where distinct
- threads share errno (or at least its virtual address). */
- if (pi->thread_errno != NULL)
- __mf_unregister (pi->thread_errno, sizeof (int), __MF_TYPE_GUESS);
-
- /* XXX: Only detached threads should designate themselves as dead
- here. Non-detached threads are marked dead after their
- personalized pthread_join() call. */
- pi->state = reentrant;
- pi->dead_p = 1;
+ if (__mf_opts.heur_std_data)
+ __mf_unregister (&errno, sizeof (errno), __MF_TYPE_GUESS);
- VERBOSE_TRACE ("thread pi %p exiting\n", pi);
+#ifndef HAVE_TLS
+ struct mf_thread_data *data = __mf_find_threadinfo (0);
+ if (data)
+ data->used_p = 0;
+#endif
}
static void *
__mf_pthread_spawner (void *arg)
{
- struct pthread_info *pi = __mf_find_threadinfo ();
void *result = NULL;
- /* Turn off reentrancy indications. */
- assert (pi->state == reentrant);
- pi->state = active;
-
- VERBOSE_TRACE ("new user thread\n");
+#ifndef HAVE_TLS
+ __mf_set_state (active);
+#endif
+ /* NB: We could use __MF_TYPE_STATIC here, but we guess that the thread
+ errno is coming out of some dynamically allocated pool that we already
+ know of as __MF_TYPE_HEAP. */
if (__mf_opts.heur_std_data)
- {
- pi->thread_errno = & errno;
- __mf_register (pi->thread_errno, sizeof (int),
- __MF_TYPE_GUESS, "errno area (thread)");
- /* NB: we could use __MF_TYPE_STATIC above, but we guess that
- the thread errno is coming out of some dynamically allocated
- pool that we already know of as __MF_TYPE_HEAP. */
- }
+ __mf_register (&errno, sizeof (errno), __MF_TYPE_GUESS,
+ "errno area (thread)");
/* We considered using pthread_key_t objects instead of these
cleanup stacks, but they were less cooperative with the
interposed malloc hooks in libmudflap. */
- pthread_cleanup_push (& __mf_pthread_cleanup, pi);
-
- /* Call user thread */
- {
- /* Extract given entry point and argument. */
- struct pthread_start_info *psi = arg;
- void * (*user_fn)(void *) = psi->user_fn;
- void *user_arg = psi->user_arg;
+ /* ??? The pthread_key_t problem is solved above... */
+ pthread_cleanup_push (__mf_pthread_cleanup, NULL);
- /* Signal the main thread to resume. */
- psi->thread_info = pi;
+ /* Extract given entry point and argument. */
+ struct mf_thread_start_info *psi = arg;
+ void * (*user_fn)(void *) = psi->user_fn;
+ void *user_arg = psi->user_arg;
+ CALL_REAL (free, arg);
- result = (*user_fn)(user_arg);
- }
+ result = (*user_fn)(user_arg);
pthread_cleanup_pop (1 /* execute */);
- /* NB: there is a slight race here. The pthread_info field will now
- say this thread is dead, but it may still be running .. right
- here. We try to check for this possibility using the
- pthread_kill test below. */
-
return result;
}
@@ -357,245 +291,31 @@ __mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
void * (*start) (void *), void *arg)
{
- DECLARE(int, munmap, void *p, size_t l);
- DECLARE(void *, mmap, void *p, size_t l, int prot, int flags, int fd, off_t of);
- DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
- void * (*start) (void *), void *arg);
- int result;
- pthread_attr_t override_attr;
- void *override_stack;
- size_t override_stacksize;
- void *override_stack_alloc = (void *) 0;
- size_t override_stacksize_alloc = 0;
- unsigned i;
+ int result, need_wrapper = 0;
TRACE ("pthread_create\n");
- /* Garbage-collect dead threads' stacks. */
- LOCKTH ();
- for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
+#ifndef HAVE_TLS
+ need_wrapper = 1;
+#endif
+ need_wrapper |= __mf_opts.heur_std_data != 0;
+
+ if (need_wrapper)
{
- struct pthread_info *pi = & __mf_pthread_info [i];
- if (! pi->used_p)
- continue;
- if (! pi->dead_p)
- continue;
-
- /* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
- (unsigned) pi->self, pi, pi->dead_p); */
-
- /* Delay actual deallocation by a few cycles, try to discourage the
- race mentioned at the end of __mf_pthread_spawner(). */
- if (pi->dead_p)
- pi->dead_p ++;
- if (pi->dead_p >= 10 /* XXX */)
- {
- if (pi->stack)
- CALL_REAL (munmap, pi->stack_alloc, pi->stack_size_alloc);
+ struct mf_thread_start_info *si = CALL_REAL (malloc, sizeof (*si));
- VERBOSE_TRACE ("slot %u freed, stack %p\n", i, pi->stack_alloc);
- memset (pi, 0, sizeof (*pi));
+ /* Fill in startup-control fields. */
+ si->user_fn = start;
+ si->user_arg = arg;
- /* One round of garbage collection is enough. */
- break;
- }
+ /* Actually create the thread. */
+ result = CALL_REAL (pthread_create, thr, attr, __mf_pthread_spawner, si);
}
- UNLOCKTH ();
-
- /* Let's allocate a stack for this thread, if one is not already
- supplied by the caller. We don't want to let e.g. the
- linuxthreads manager thread do this allocation. */
- if (attr != NULL)
- override_attr = *attr;
else
- pthread_attr_init (& override_attr);
-
- /* Get supplied attributes, if any. */
- /* XXX: consider using POSIX2K attr_getstack() */
- if (pthread_attr_getstackaddr (& override_attr, & override_stack) != 0 ||
- pthread_attr_getstacksize (& override_attr, & override_stacksize) != 0)
{
- override_stack = NULL;
- override_stacksize = 0;
+ /* If we're not handling heur_std_data, nothing special to do. */
+ result = CALL_REAL (pthread_create, thr, attr, start, arg);
}
- /* Do we need to allocate the new thread's stack? */
- if (__mf_opts.thread_stack && override_stack == NULL)
- {
- uintptr_t alignment = 256; /* power of two */
-
- /* Perturb the initial stack addresses slightly, to encourage
- threads to have nonconflicting entries in the lookup cache
- for their tracked stack objects. */
- static unsigned perturb = 0;
- const unsigned perturb_delta = 32;
- const unsigned perturb_count = 16;
- perturb += perturb_delta;
- if (perturb > perturb_delta*perturb_count) perturb = 0;
-
- /* Use glibc x86 defaults */
-/* Should have been defined in <limits.h> */
-#ifndef PTHREAD_STACK_MIN
-#define PTHREAD_STACK_MIN 65536
-#endif
- override_stacksize = max (PTHREAD_STACK_MIN, __mf_opts.thread_stack * 1024);
-
-
-#if defined(MAP_ANONYMOUS)
-#define MF_MAP_ANON MAP_ANONYMOUS
-#elif defined(MAP_ANON)
-#define MF_MAP_ANON MAP_ANON
-#endif
-
-#ifndef MAP_FAILED
-#define MAP_FAILED ((void *) -1)
-#endif
-
-#ifdef MF_MAP_ANON
- override_stack = CALL_REAL (mmap, NULL, override_stacksize,
- PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MF_MAP_ANON,
- 0, 0);
-#else
- /* Try mapping /dev/zero instead. */
- {
- static int zerofd = -1;
- if (zerofd == -1)
- zerofd = open ("/dev/zero", O_RDWR);
- if (zerofd == -1)
- override_stack = MAP_FAILED;
- else
- override_stack = CALL_REAL (mmap, NULL, override_stacksize,
- PROT_READ|PROT_WRITE,
- MAP_PRIVATE, zerofd, 0);
- }
-#endif
-
- if (override_stack == 0 || override_stack == MAP_FAILED)
- {
- errno = EAGAIN;
- return -1;
- }
-
- VERBOSE_TRACE ("thread stack alloc %p size %lu\n",
- override_stack, (unsigned long) override_stacksize);
-
- /* Save the original allocated values for later deallocation. */
- override_stack_alloc = override_stack;
- override_stacksize_alloc = override_stacksize;
-
- /* The stackaddr pthreads attribute is a candidate stack pointer.
- It must point near the top or the bottom of this buffer, depending
- on whether stack grows downward or upward, and suitably aligned.
- On the x86, it grows down, so we set stackaddr near the top. */
- /* XXX: port logic */
- override_stack = (void *)
- (((uintptr_t) override_stack + override_stacksize - alignment - perturb)
- & (~(uintptr_t)(alignment-1)));
-
- /* XXX: consider using POSIX2K attr_setstack() */
- if (pthread_attr_setstackaddr (& override_attr, override_stack) != 0 ||
- pthread_attr_setstacksize (& override_attr,
- override_stacksize - alignment - perturb) != 0)
- {
- /* This should not happen. */
- CALL_REAL (munmap, override_stack, override_stacksize);
- errno = EAGAIN;
- return -1;
- }
- }
-
- /* Actually start the child thread. */
- {
- struct pthread_start_info psi;
- struct pthread_info *pi = NULL;
-
- /* Fill in startup-control fields. */
- psi.user_fn = start;
- psi.user_arg = arg;
- psi.thread_info = NULL;
-
- /* Actually create the thread. */
- __mf_state = reentrant;
- result = CALL_REAL (pthread_create, thr, & override_attr,
- & __mf_pthread_spawner, (void *) & psi);
- __mf_state = active;
- /* We also hook pthread_join/pthread_exit to get into reentrant
- mode during thread shutdown/cleanup. */
-
- /* Wait until child thread has progressed far enough into its
- __mf_pthread_spawner() call. */
- while (1) /* XXX: timeout? */
- {
- volatile struct pthread_start_info *psip = & psi;
- pi = psip->thread_info;
- if (pi != NULL)
- break;
- sched_yield ();
- }
-
- /* Fill in remaining fields in pthread_info. */
- pi->stack = override_stack;
- pi->stack_size = override_stacksize;
- pi->stack_alloc = override_stack_alloc;
- pi->stack_size_alloc = override_stacksize_alloc;
- /* XXX: this might be too late for future heuristics that attempt
- to use thread stack bounds. We may need to put the new thread
- to sleep. */
- }
-
-
- /* May need to clean up if we created a pthread_attr_t of our own. */
- if (attr == NULL)
- pthread_attr_destroy (& override_attr); /* NB: this shouldn't deallocate stack */
-
return result;
}
-
-
-
-#if PIC
-/* A special bootstrap variant. */
-int
-__mf_0fn_pthread_join (pthread_t thr, void **rc)
-{
- return -1;
-}
-#endif
-
-
-#undef pthread_join
-WRAPPER(int, pthread_join, pthread_t thr, void **rc)
-{
- DECLARE(int, pthread_join, pthread_t thr, void **rc);
- int result;
-
- TRACE ("pthread_join\n");
- __mf_state = reentrant;
- result = CALL_REAL (pthread_join, thr, rc);
- __mf_state = active;
-
- return result;
-}
-
-
-#if PIC
-/* A special bootstrap variant. */
-void
-__mf_0fn_pthread_exit (void *rc)
-{
-}
-#endif
-
-
-#undef pthread_exit
-WRAPPER(void, pthread_exit, void *rc)
-{
- DECLARE(void, pthread_exit, void *rc);
-
- TRACE ("pthread_exit\n");
- /* __mf_state = reentrant; */
- CALL_REAL (pthread_exit, rc);
- /* NOTREACHED */
- exit (0); /* Satisfy noreturn attribute of pthread_exit. */
-}