aboutsummaryrefslogtreecommitdiff
path: root/nptl
diff options
context:
space:
mode:
Diffstat (limited to 'nptl')
-rw-r--r--nptl/allocatestack.c147
-rw-r--r--nptl/descr.h3
-rw-r--r--nptl/nptl-init.c11
-rw-r--r--nptl/pthreadP.h4
-rw-r--r--nptl/pthread_create.c8
5 files changed, 47 insertions, 126 deletions
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
index 4b45f8c..b7f9eee 100644
--- a/nptl/allocatestack.c
+++ b/nptl/allocatestack.c
@@ -106,26 +106,14 @@
static size_t stack_cache_maxsize = 40 * 1024 * 1024; /* 40MiBi by default. */
static size_t stack_cache_actsize;
-/* Mutex protecting this variable. */
-static int stack_cache_lock = LLL_LOCK_INITIALIZER;
-
/* List of queued stack frames. */
static LIST_HEAD (stack_cache);
-/* List of the stacks in use. */
-static LIST_HEAD (stack_used);
-
/* We need to record what list operations we are going to do so that,
in case of an asynchronous interruption due to a fork() call, we
can correct for the work. */
static uintptr_t in_flight_stack;
-/* List of the threads with user provided stacks in use. No need to
- initialize this, since it's done in __pthread_initialize_minimal. */
-list_t __stack_user __attribute__ ((nocommon));
-hidden_data_def (__stack_user)
-
-
/* Check whether the stack is still used or not. */
#define FREE_P(descr) ((descr)->tid <= 0)
@@ -173,7 +161,7 @@ get_cached_stack (size_t *sizep, void **memp)
struct pthread *result = NULL;
list_t *entry;
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Search the cache for a matching entry. We search for the
smallest stack which has at least the required size. Note that
@@ -206,7 +194,7 @@ get_cached_stack (size_t *sizep, void **memp)
|| __builtin_expect (result->stackblock_size > 4 * size, 0))
{
/* Release the lock. */
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
return NULL;
}
@@ -218,13 +206,13 @@ get_cached_stack (size_t *sizep, void **memp)
stack_list_del (&result->list);
/* And add to the list of stacks in use. */
- stack_list_add (&result->list, &stack_used);
+ stack_list_add (&result->list, &GL (dl_stack_used));
/* And decrease the cache size. */
stack_cache_actsize -= result->stackblock_size;
/* Release the lock early. */
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Report size and location of the stack to the caller. */
*sizep = result->stackblock_size;
@@ -510,12 +498,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* Prepare to modify global data. */
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* And add to the list of stacks in use. */
- list_add (&pd->list, &__stack_user);
+ list_add (&pd->list, &GL (dl_stack_user));
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
}
else
{
@@ -644,12 +632,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* Prepare to modify global data. */
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* And add to the list of stacks in use. */
- stack_list_add (&pd->list, &stack_used);
+ stack_list_add (&pd->list, &GL (dl_stack_used));
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* There might have been a race. Another thread might have
@@ -690,12 +678,12 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
if (__mprotect (guard, guardsize, PROT_NONE) != 0)
{
mprot_error:
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Remove the thread from the list. */
stack_list_del (&pd->list);
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Get rid of the TLS block we allocated. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
@@ -799,7 +787,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
void
__deallocate_stack (struct pthread *pd)
{
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Remove the thread from the list of threads with user defined
stacks. */
@@ -815,7 +803,7 @@ __deallocate_stack (struct pthread *pd)
/* Free the memory associated with the ELF TLS. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
}
@@ -831,10 +819,10 @@ __make_stacks_executable (void **stack_endp)
const size_t pagemask = ~(__getpagesize () - 1);
#endif
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
list_t *runp;
- list_for_each (runp, &stack_used)
+ list_for_each (runp, &GL (dl_stack_used))
{
err = change_stack_perm (list_entry (runp, struct pthread, list)
#ifdef NEED_SEPARATE_REGISTER_STACK
@@ -860,7 +848,7 @@ __make_stacks_executable (void **stack_endp)
break;
}
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
return err;
}
@@ -891,8 +879,8 @@ __reclaim_stacks (void)
pointers at the head of the list are inconsistent. */
list_t *l = NULL;
- if (stack_used.next->prev != &stack_used)
- l = &stack_used;
+ if (GL (dl_stack_used).next->prev != &GL (dl_stack_used))
+ l = &GL (dl_stack_used);
else if (stack_cache.next->prev != &stack_cache)
l = &stack_cache;
@@ -914,7 +902,7 @@ __reclaim_stacks (void)
/* Mark all stacks except the still running one as free. */
list_t *runp;
- list_for_each (runp, &stack_used)
+ list_for_each (runp, &GL (dl_stack_used))
{
struct pthread *curp = list_entry (runp, struct pthread, list);
if (curp != self)
@@ -948,7 +936,7 @@ __reclaim_stacks (void)
}
/* Add the stack of all running threads to the cache. */
- list_splice (&stack_used, &stack_cache);
+ list_splice (&GL (dl_stack_used), &stack_cache);
/* Remove the entry for the current thread to from the cache list
and add it to the list of running threads. Which of the two
@@ -956,13 +944,13 @@ __reclaim_stacks (void)
stack_list_del (&self->list);
/* Re-initialize the lists for all the threads. */
- INIT_LIST_HEAD (&stack_used);
- INIT_LIST_HEAD (&__stack_user);
+ INIT_LIST_HEAD (&GL (dl_stack_used));
+ INIT_LIST_HEAD (&GL (dl_stack_user));
if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
- list_add (&self->list, &__stack_user);
+ list_add (&self->list, &GL (dl_stack_user));
else
- list_add (&self->list, &stack_used);
+ list_add (&self->list, &GL (dl_stack_used));
/* There is one thread running. */
__nptl_nthreads = 1;
@@ -970,7 +958,7 @@ __reclaim_stacks (void)
in_flight_stack = 0;
/* Initialize locks. */
- stack_cache_lock = LLL_LOCK_INITIALIZER;
+ GL (dl_stack_cache_lock) = LLL_LOCK_INITIALIZER;
__default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
}
@@ -1083,7 +1071,7 @@ __nptl_setxid (struct xid_command *cmdp)
{
int signalled;
int result;
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
__xidcmd = cmdp;
cmdp->cntr = 0;
@@ -1093,7 +1081,7 @@ __nptl_setxid (struct xid_command *cmdp)
/* Iterate over the list with system-allocated threads first. */
list_t *runp;
- list_for_each (runp, &stack_used)
+ list_for_each (runp, &GL (dl_stack_used))
{
struct pthread *t = list_entry (runp, struct pthread, list);
if (t == self)
@@ -1103,7 +1091,7 @@ __nptl_setxid (struct xid_command *cmdp)
}
/* Now the list with threads using user-allocated stacks. */
- list_for_each (runp, &__stack_user)
+ list_for_each (runp, &GL (dl_stack_user))
{
struct pthread *t = list_entry (runp, struct pthread, list);
if (t == self)
@@ -1119,7 +1107,7 @@ __nptl_setxid (struct xid_command *cmdp)
{
signalled = 0;
- list_for_each (runp, &stack_used)
+ list_for_each (runp, &GL (dl_stack_used))
{
struct pthread *t = list_entry (runp, struct pthread, list);
if (t == self)
@@ -1128,7 +1116,7 @@ __nptl_setxid (struct xid_command *cmdp)
signalled += setxid_signal_thread (cmdp, t);
}
- list_for_each (runp, &__stack_user)
+ list_for_each (runp, &GL (dl_stack_user))
{
struct pthread *t = list_entry (runp, struct pthread, list);
if (t == self)
@@ -1149,7 +1137,7 @@ __nptl_setxid (struct xid_command *cmdp)
/* Clean up flags, so that no thread blocks during exit waiting
for a signal which will never come. */
- list_for_each (runp, &stack_used)
+ list_for_each (runp, &GL (dl_stack_used))
{
struct pthread *t = list_entry (runp, struct pthread, list);
if (t == self)
@@ -1158,7 +1146,7 @@ __nptl_setxid (struct xid_command *cmdp)
setxid_unmark_thread (cmdp, t);
}
- list_for_each (runp, &__stack_user)
+ list_for_each (runp, &GL (dl_stack_user))
{
struct pthread *t = list_entry (runp, struct pthread, list);
if (t == self)
@@ -1180,7 +1168,7 @@ __nptl_setxid (struct xid_command *cmdp)
}
__nptl_setxid_error (cmdp, error);
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
return result;
}
@@ -1204,75 +1192,16 @@ void
attribute_hidden
__pthread_init_static_tls (struct link_map *map)
{
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Iterate over the list with system-allocated threads first. */
list_t *runp;
- list_for_each (runp, &stack_used)
+ list_for_each (runp, &GL (dl_stack_used))
init_one_static_tls (list_entry (runp, struct pthread, list), map);
/* Now the list with threads using user-allocated stacks. */
- list_for_each (runp, &__stack_user)
+ list_for_each (runp, &GL (dl_stack_user))
init_one_static_tls (list_entry (runp, struct pthread, list), map);
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
-}
-
-
-void
-attribute_hidden
-__wait_lookup_done (void)
-{
- lll_lock (stack_cache_lock, LLL_PRIVATE);
-
- struct pthread *self = THREAD_SELF;
-
- /* Iterate over the list with system-allocated threads first. */
- list_t *runp;
- list_for_each (runp, &stack_used)
- {
- struct pthread *t = list_entry (runp, struct pthread, list);
- if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
- continue;
-
- int *const gscope_flagp = &t->header.gscope_flag;
-
- /* We have to wait until this thread is done with the global
- scope. First tell the thread that we are waiting and
- possibly have to be woken. */
- if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
- THREAD_GSCOPE_FLAG_WAIT,
- THREAD_GSCOPE_FLAG_USED))
- continue;
-
- do
- futex_wait_simple ((unsigned int *) gscope_flagp,
- THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
- while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
- }
-
- /* Now the list with threads using user-allocated stacks. */
- list_for_each (runp, &__stack_user)
- {
- struct pthread *t = list_entry (runp, struct pthread, list);
- if (t == self || t->header.gscope_flag == THREAD_GSCOPE_FLAG_UNUSED)
- continue;
-
- int *const gscope_flagp = &t->header.gscope_flag;
-
- /* We have to wait until this thread is done with the global
- scope. First tell the thread that we are waiting and
- possibly have to be woken. */
- if (atomic_compare_and_exchange_bool_acq (gscope_flagp,
- THREAD_GSCOPE_FLAG_WAIT,
- THREAD_GSCOPE_FLAG_USED))
- continue;
-
- do
- futex_wait_simple ((unsigned int *) gscope_flagp,
- THREAD_GSCOPE_FLAG_WAIT, FUTEX_PRIVATE);
- while (*gscope_flagp == THREAD_GSCOPE_FLAG_WAIT);
- }
-
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
}
diff --git a/nptl/descr.h b/nptl/descr.h
index d8343ff..b172ee4 100644
--- a/nptl/descr.h
+++ b/nptl/descr.h
@@ -162,7 +162,8 @@ struct pthread
void *__padding[24];
};
- /* This descriptor's link on the `stack_used' or `__stack_user' list. */
+ /* This descriptor's link on the GL (dl_stack_used) or
+ GL (dl_stack_user) list. */
list_t list;
/* Thread ID - which is also a 'is this thread descriptor (and
diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c
index 4aa1231..53b8177 100644
--- a/nptl/nptl-init.c
+++ b/nptl/nptl-init.c
@@ -251,12 +251,9 @@ __pthread_initialize_minimal_internal (void)
purposes this is good enough. */
THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
- /* Initialize the list of all running threads with the main thread. */
- INIT_LIST_HEAD (&__stack_user);
- list_add (&pd->list, &__stack_user);
-
- /* Before initializing __stack_user, the debugger could not find us and
- had to set __nptl_initial_report_events. Propagate its setting. */
+ /* Before initializing GL (dl_stack_user), the debugger could not
+ find us and had to set __nptl_initial_report_events. Propagate
+ its setting. */
THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
struct sigaction sa;
@@ -336,8 +333,6 @@ __pthread_initialize_minimal_internal (void)
GL(dl_init_static_tls) = &__pthread_init_static_tls;
- GL(dl_wait_lookup_done) = &__wait_lookup_done;
-
/* Register the fork generation counter with the libc. */
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
__libc_multiple_threads_ptr =
diff --git a/nptl/pthreadP.h b/nptl/pthreadP.h
index 686f54a..a7510f9 100644
--- a/nptl/pthreadP.h
+++ b/nptl/pthreadP.h
@@ -208,10 +208,6 @@ extern void __default_pthread_attr_freeres (void) attribute_hidden;
extern size_t __static_tls_size attribute_hidden;
extern size_t __static_tls_align_m1 attribute_hidden;
-/* Thread descriptor handling. */
-extern list_t __stack_user;
-hidden_proto (__stack_user)
-
/* Attribute handling. */
extern struct pthread_attr *__attr_list attribute_hidden;
extern int __attr_list_lock attribute_hidden;
diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c
index 447f005..bad4e57 100644
--- a/nptl/pthread_create.c
+++ b/nptl/pthread_create.c
@@ -213,9 +213,9 @@ __find_in_stack_list (struct pthread *pd)
list_t *entry;
struct pthread *result = NULL;
- lll_lock (stack_cache_lock, LLL_PRIVATE);
+ lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
- list_for_each (entry, &stack_used)
+ list_for_each (entry, &GL (dl_stack_used))
{
struct pthread *curp;
@@ -228,7 +228,7 @@ __find_in_stack_list (struct pthread *pd)
}
if (result == NULL)
- list_for_each (entry, &__stack_user)
+ list_for_each (entry, &GL (dl_stack_user))
{
struct pthread *curp;
@@ -240,7 +240,7 @@ __find_in_stack_list (struct pthread *pd)
}
}
- lll_unlock (stack_cache_lock, LLL_PRIVATE);
+ lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
return result;
}