diff options
author | Florian Weimer <fweimer@redhat.com> | 2021-05-10 10:31:42 +0200 |
---|---|---|
committer | Florian Weimer <fweimer@redhat.com> | 2021-05-10 10:31:42 +0200 |
commit | 732139dabeda7ecce0d56200bc176251e759ccde (patch) | |
tree | cf54a8fcd1ce035217916297f0477861b7960182 /sysdeps/nptl | |
parent | 8c1c0da3a8112695d5cc82aae22666321965b42f (diff) | |
download | glibc-732139dabeda7ecce0d56200bc176251e759ccde.zip glibc-732139dabeda7ecce0d56200bc176251e759ccde.tar.gz glibc-732139dabeda7ecce0d56200bc176251e759ccde.tar.bz2 |
Linux: Move __reclaim_stacks into the fork implementation in libc
As a result, __libc_pthread_init is no longer needed.
Tested-by: Carlos O'Donell <carlos@redhat.com>
Reviewed-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'sysdeps/nptl')
-rw-r--r-- | sysdeps/nptl/fork.c | 110 |
1 files changed, 110 insertions, 0 deletions
diff --git a/sysdeps/nptl/fork.c b/sysdeps/nptl/fork.c index f41c40f..062b012 100644 --- a/sysdeps/nptl/fork.c +++ b/sysdeps/nptl/fork.c @@ -35,6 +35,7 @@ #include <nss/nss_database.h> #include <unwind-link.h> #include <sys/single_threaded.h> +#include <list.h> static void fresetlockfiles (void) @@ -46,6 +47,106 @@ fresetlockfiles (void) _IO_lock_init (*((_IO_lock_t *) _IO_iter_file(i)->_lock)); } +/* In case of a fork() call the memory allocation in the child will be + the same but only one thread is running. All stacks except that of + the one running thread are not used anymore. We have to recycle + them. */ +static void +reclaim_stacks (void) +{ + struct pthread *self = (struct pthread *) THREAD_SELF; + + /* No locking necessary. The caller is the only stack in use. But + we have to be aware that we might have interrupted a list + operation. */ + + if (GL (dl_in_flight_stack) != 0) + { + bool add_p = GL (dl_in_flight_stack) & 1; + list_t *elem = (list_t *) (GL (dl_in_flight_stack) & ~(uintptr_t) 1); + + if (add_p) + { + /* We always add at the beginning of the list. So in this case we + only need to check the beginning of these lists to see if the + pointers at the head of the list are inconsistent. */ + list_t *l = NULL; + + if (GL (dl_stack_used).next->prev != &GL (dl_stack_used)) + l = &GL (dl_stack_used); + else if (GL (dl_stack_cache).next->prev != &GL (dl_stack_cache)) + l = &GL (dl_stack_cache); + + if (l != NULL) + { + assert (l->next->prev == elem); + elem->next = l->next; + elem->prev = l; + l->next = elem; + } + } + else + { + /* We can simply always replay the delete operation. */ + elem->next->prev = elem->prev; + elem->prev->next = elem->next; + } + + GL (dl_in_flight_stack) = 0; + } + + /* Mark all stacks except the still running one as free. */ + list_t *runp; + list_for_each (runp, &GL (dl_stack_used)) + { + struct pthread *curp = list_entry (runp, struct pthread, list); + if (curp != self) + { + /* This marks the stack as free. */ + curp->tid = 0; + + /* Account for the size of the stack. */ + GL (dl_stack_cache_actsize) += curp->stackblock_size; + + if (curp->specific_used) + { + /* Clear the thread-specific data. */ + memset (curp->specific_1stblock, '\0', + sizeof (curp->specific_1stblock)); + + curp->specific_used = false; + + for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt) + if (curp->specific[cnt] != NULL) + { + memset (curp->specific[cnt], '\0', + sizeof (curp->specific_1stblock)); + + /* We have allocated the block which we do not + free here so re-set the bit. */ + curp->specific_used = true; + } + } + } + } + + /* Add the stack of all running threads to the cache. */ + list_splice (&GL (dl_stack_used), &GL (dl_stack_cache)); + + /* Remove the entry for the current thread to from the cache list + and add it to the list of running threads. Which of the two + lists is decided by the user_stack flag. */ + list_del (&self->list); + + /* Re-initialize the lists for all the threads. */ + INIT_LIST_HEAD (&GL (dl_stack_used)); + INIT_LIST_HEAD (&GL (dl_stack_user)); + + if (__glibc_unlikely (THREAD_GETMEM (self, user_stack))) + list_add (&self->list, &GL (dl_stack_user)); + else + list_add (&self->list, &GL (dl_stack_used)); +} pid_t __libc_fork (void) @@ -112,6 +213,13 @@ __libc_fork (void) { __libc_unwind_link_after_fork (); + /* There is one thread running. */ + __nptl_nthreads = 1; + + /* Initialize thread library locks. */ + GL (dl_stack_cache_lock) = LLL_LOCK_INITIALIZER; + __default_pthread_attr_lock = LLL_LOCK_INITIALIZER; + /* Release malloc locks. */ call_function_static_weak (__malloc_fork_unlock_child); @@ -128,6 +236,8 @@ __libc_fork (void) /* Reset the lock the dynamic loader uses to protect its data. */ __rtld_lock_initialize (GL(dl_load_lock)); + reclaim_stacks (); + /* Run the handlers registered for the child. */ __run_fork_handlers (atfork_run_child, multiple_threads); } |