aboutsummaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2021-05-10 10:31:41 +0200
committerFlorian Weimer <fweimer@redhat.com>2021-05-10 10:31:41 +0200
commitd6163dfd3831cf48b69f430f37b4c099059a9db5 (patch)
treefb37627d311887c530307ea5325121f6a4732cc2 /sysdeps
parenta64af8c9b6598f6d2685227f64f5ccb9b48c663c (diff)
downloadglibc-d6163dfd3831cf48b69f430f37b4c099059a9db5.zip
glibc-d6163dfd3831cf48b69f430f37b4c099059a9db5.tar.gz
glibc-d6163dfd3831cf48b69f430f37b4c099059a9db5.tar.bz2
elf, nptl: Resolve recursive lock implementation early
If libpthread is included in libc, it is not necessary to delay initialization of the lock/unlock function pointers until libpthread is loaded. This eliminates two unprotected function pointers from _rtld_global and removes some initialization code from libpthread. Tested-by: Carlos O'Donell <carlos@redhat.com> Reviewed-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/generic/ldsodefs.h25
-rw-r--r--sysdeps/nptl/dl-mutex.c53
-rw-r--r--sysdeps/nptl/libc-lockP.h17
3 files changed, 81 insertions, 14 deletions
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 1b064c5..6d590d1 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -403,7 +403,7 @@ struct rtld_global
struct auditstate _dl_rtld_auditstate[DL_NNS];
#endif
-#if defined SHARED && defined _LIBC_REENTRANT \
+#if !PTHREAD_IN_LIBC && defined SHARED \
&& defined __rtld_lock_default_lock_recursive
EXTERN void (*_dl_rtld_lock_recursive) (void *);
EXTERN void (*_dl_rtld_unlock_recursive) (void *);
@@ -1318,6 +1318,29 @@ link_map_audit_state (struct link_map *l, size_t index)
}
#endif /* SHARED */
+#if PTHREAD_IN_LIBC && defined SHARED
+/* Recursive locking implementation for use within the dynamic loader.
+ Used to define the __rtld_lock_lock_recursive and
+ __rtld_lock_unlock_recursive via <libc-lock.h>. Initialized to a
+ no-op dummy implementation early. Similar
+ to GL (dl_rtld_lock_recursive) and GL (dl_rtld_unlock_recursive)
+ in !PTHREAD_IN_LIBC builds. */
+extern int (*___rtld_mutex_lock) (pthread_mutex_t *) attribute_hidden;
+extern int (*___rtld_mutex_unlock) (pthread_mutex_t *lock) attribute_hidden;
+
+/* Called after libc has been loaded, but before RELRO is activated.
+ Used to initialize the function pointers to the actual
+ implementations. */
+void __rtld_mutex_init (void) attribute_hidden;
+#else /* !PTHREAD_IN_LIBC */
+static inline void
+__rtld_mutex_init (void)
+{
+ /* The initialization happens later (!PTHREAD_IN_LIBC) or is not
+ needed at all (!SHARED). */
+}
+#endif /* !PTHREAD_IN_LIBC */
+
#if THREAD_GSCOPE_IN_TCB
void __thread_gscope_wait (void) attribute_hidden;
# define THREAD_GSCOPE_WAIT() __thread_gscope_wait ()
diff --git a/sysdeps/nptl/dl-mutex.c b/sysdeps/nptl/dl-mutex.c
new file mode 100644
index 0000000..08b71dc
--- /dev/null
+++ b/sysdeps/nptl/dl-mutex.c
@@ -0,0 +1,53 @@
+/* Recursive locking implementation for the dynamic loader. NPTL version.
+ Copyright (C) 2021 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+/* Use the mutex implementation in libc (assuming PTHREAD_IN_LIBC). */
+
+#include <assert.h>
+#include <first-versions.h>
+#include <ldsodefs.h>
+
+__typeof (pthread_mutex_lock) *___rtld_mutex_lock attribute_relro;
+__typeof (pthread_mutex_unlock) *___rtld_mutex_unlock attribute_relro;
+
+void
+__rtld_mutex_init (void)
+{
+ /* There is an implicit assumption here that the lock counters are
+ zero and this function is called while nothing is locked. For
+ early initialization of the mutex functions this is true because
+ it happens directly in dl_main in elf/rtld.c, and not some ELF
+ constructor while holding loader locks. */
+
+ struct link_map *libc_map = GL (dl_ns)[LM_ID_BASE].libc_map;
+
+ const ElfW(Sym) *sym
+ = _dl_lookup_direct (libc_map, "pthread_mutex_lock",
+ 0x4f152227, /* dl_new_hash output. */
+ FIRST_VERSION_libc_pthread_mutex_lock_STRING,
+ FIRST_VERSION_libc_pthread_mutex_lock_HASH);
+ assert (sym != NULL);
+ ___rtld_mutex_lock = DL_SYMBOL_ADDRESS (libc_map, sym);
+
+ sym = _dl_lookup_direct (libc_map, "pthread_mutex_unlock",
+ 0x7dd7aaaa, /* dl_new_hash output. */
+ FIRST_VERSION_libc_pthread_mutex_unlock_STRING,
+ FIRST_VERSION_libc_pthread_mutex_unlock_HASH);
+ assert (sym != NULL);
+ ___rtld_mutex_unlock = DL_SYMBOL_ADDRESS (libc_map, sym);
+}
diff --git a/sysdeps/nptl/libc-lockP.h b/sysdeps/nptl/libc-lockP.h
index ae9691d..ec7b02b 100644
--- a/sysdeps/nptl/libc-lockP.h
+++ b/sysdeps/nptl/libc-lockP.h
@@ -151,9 +151,6 @@ _Static_assert (LLL_LOCK_INITIALIZER == 0, "LLL_LOCK_INITIALIZER != 0");
__libc_maybe_call (__pthread_mutex_trylock, (&(NAME)), 0)
#endif
-#define __rtld_lock_trylock_recursive(NAME) \
- __libc_maybe_call (__pthread_mutex_trylock, (&(NAME).mutex), 0)
-
/* Unlock the named lock variable. */
#if IS_IN (libc) || IS_IN (libpthread)
# define __libc_lock_unlock(NAME) \
@@ -163,19 +160,13 @@ _Static_assert (LLL_LOCK_INITIALIZER == 0, "LLL_LOCK_INITIALIZER != 0");
#endif
#define __libc_rwlock_unlock(NAME) __pthread_rwlock_unlock (&(NAME))
-#ifdef SHARED
-# define __rtld_lock_default_lock_recursive(lock) \
- ++((pthread_mutex_t *)(lock))->__data.__count;
-
-# define __rtld_lock_default_unlock_recursive(lock) \
- --((pthread_mutex_t *)(lock))->__data.__count;
-
+#if IS_IN (rtld)
# define __rtld_lock_lock_recursive(NAME) \
- GL(dl_rtld_lock_recursive) (&(NAME).mutex)
+ ___rtld_mutex_lock (&(NAME).mutex)
# define __rtld_lock_unlock_recursive(NAME) \
- GL(dl_rtld_unlock_recursive) (&(NAME).mutex)
-#else
+ ___rtld_mutex_unlock (&(NAME).mutex)
+#else /* Not in the dynamic loader. */
# define __rtld_lock_lock_recursive(NAME) \
__pthread_mutex_lock (&(NAME).mutex)