diff options
author | Florian Weimer <fweimer@redhat.com> | 2021-04-21 19:49:51 +0200 |
---|---|---|
committer | Florian Weimer <fweimer@redhat.com> | 2021-04-21 19:49:51 +0200 |
commit | 442e8a40da9dfa24aeebf4f1a163f0a58b12cf7e (patch) | |
tree | 4f425bffad25e4232f2ad8b22bc0835a1d5d7a69 /sysdeps/nptl/dl-tls_init_tp.c | |
parent | 90d7e7e5bd3b0683a27c658388b6515ce950c78e (diff) | |
download | glibc-442e8a40da9dfa24aeebf4f1a163f0a58b12cf7e.zip glibc-442e8a40da9dfa24aeebf4f1a163f0a58b12cf7e.tar.gz glibc-442e8a40da9dfa24aeebf4f1a163f0a58b12cf7e.tar.bz2 |
nptl: Move part of TCB initialization from libpthread to __tls_init_tp
This initalization should only happen once for the main thread's TCB.
At present, auditors can achieve this by not linking against
libpthread. If libpthread becomes part of libc, doing this
initialization in libc would happen for every audit namespace,
or too late (if it happens from the main libc only). That's why
moving this code into ld.so seems the right thing to do, right after
the TCB initialization.
For !__ASSUME_SET_ROBUST_LIST ports, this also moves the symbol
__set_robust_list_avail into ld.so, as __nptl_set_robust_list_avail.
It also turned into a proper boolean flag.
Inline the __pthread_initialize_pids function because it seems no
longer useful as a separate function.
Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Diffstat (limited to 'sysdeps/nptl/dl-tls_init_tp.c')
-rw-r--r-- | sysdeps/nptl/dl-tls_init_tp.c | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/sysdeps/nptl/dl-tls_init_tp.c b/sysdeps/nptl/dl-tls_init_tp.c index 8983808..c5172b7 100644 --- a/sysdeps/nptl/dl-tls_init_tp.c +++ b/sysdeps/nptl/dl-tls_init_tp.c @@ -16,10 +16,17 @@ License along with the GNU C Library; if not, see <https://www.gnu.org/licenses/>. */ +#include <kernel-features.h> #include <ldsodefs.h> #include <list.h> +#include <nptl/pthreadP.h> #include <tls.h> +#ifndef __ASSUME_SET_ROBUST_LIST +bool __nptl_set_robust_list_avail; +rtld_hidden_data_def (__nptl_set_robust_list_avail) +#endif + void __tls_init_tp (void) { @@ -27,4 +34,34 @@ __tls_init_tp (void) INIT_LIST_HEAD (&GL (dl_stack_used)); INIT_LIST_HEAD (&GL (dl_stack_user)); list_add (&THREAD_SELF->list, &GL (dl_stack_user)); + + /* Early initialization of the TCB. */ + struct pthread *pd = THREAD_SELF; + pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid); + THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]); + THREAD_SETMEM (pd, user_stack, true); + + /* Initialize the robust mutex data. */ + { +#if __PTHREAD_MUTEX_HAVE_PREV + pd->robust_prev = &pd->robust_head; +#endif + pd->robust_head.list = &pd->robust_head; + pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) + - offsetof (pthread_mutex_t, + __data.__list.__next)); + int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head, + sizeof (struct robust_list_head)); + if (!INTERNAL_SYSCALL_ERROR_P (res)) + { +#ifndef __ASSUME_SET_ROBUST_LIST + __nptl_set_robust_list_avail = true; +#endif + } + } + + /* Set initial thread's stack block from 0 up to __libc_stack_end. + It will be bigger than it actually is, but for unwind.c/pt-longjmp.c + purposes this is good enough. */ + THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end); } |