aboutsummaryrefslogtreecommitdiff
path: root/elf
diff options
context:
space:
mode:
authorAllan McRae <allan@archlinux.org>2014-02-05 21:14:59 +1000
committerAllan McRae <allan@archlinux.org>2014-02-06 08:46:20 +1000
commit73d61e4f6c65da714c0f8a3a233725322553ceba (patch)
tree1bd578a230d6b0d7a7d09c53de0c243008928df3 /elf
parent27e839f6f068b6109c6bf2f634f4426b48723218 (diff)
downloadglibc-73d61e4f6c65da714c0f8a3a233725322553ceba.zip
glibc-73d61e4f6c65da714c0f8a3a233725322553ceba.tar.gz
glibc-73d61e4f6c65da714c0f8a3a233725322553ceba.tar.bz2
Revert "Async-signal safe TLS."
This reverts commit 7f507ee17aee720fa423fa38502bc3caa0dd03d7. Conflicts: ChangeLog nptl/tst-tls7.c nptl/tst-tls7mod.c
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-open.c5
-rw-r--r--elf/dl-reloc.c48
-rw-r--r--elf/dl-tls.c100
3 files changed, 39 insertions, 114 deletions
diff --git a/elf/dl-open.c b/elf/dl-open.c
index ea222d0..a9ca6b3 100644
--- a/elf/dl-open.c
+++ b/elf/dl-open.c
@@ -548,10 +548,7 @@ cannot load any more object with static TLS"));
generation of the DSO we are allocating data for. */
_dl_update_slotinfo (imap->l_tls_modid);
#endif
- /* We do this iteration under a signal mask in dl-reloc; why not
- here? Because these symbols are new and dlopen hasn't
- returned yet. So we can't possibly be racing with a TLS
- access to them from another thread. */
+
GL(dl_init_static_tls) (imap);
assert (imap->l_need_tls_init == 0);
}
diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c
index 81ee47e..1f66fcc 100644
--- a/elf/dl-reloc.c
+++ b/elf/dl-reloc.c
@@ -16,10 +16,8 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <atomic.h>
#include <errno.h>
#include <libintl.h>
-#include <signal.h>
#include <stdlib.h>
#include <unistd.h>
#include <ldsodefs.h>
@@ -72,6 +70,8 @@ _dl_try_allocate_static_tls (struct link_map *map)
size_t offset = GL(dl_tls_static_used) + (freebytes - n * map->l_tls_align
- map->l_tls_firstbyte_offset);
+
+ map->l_tls_offset = GL(dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
/* dl_tls_static_used includes the TCB at the beginning. */
size_t offset = (((GL(dl_tls_static_used)
@@ -83,36 +83,7 @@ _dl_try_allocate_static_tls (struct link_map *map)
if (used > GL(dl_tls_static_size))
goto fail;
-#else
-# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
-#endif
- /* We've computed the new value we want, now try to install it. */
- ptrdiff_t val;
- if ((val = map->l_tls_offset) == NO_TLS_OFFSET)
- {
- /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to
- change it go from NO_TLS_OFFSET to some other value. We use
- compare_and_exchange to ensure only one attempt succeeds. We
- don't actually need any memory ordering here, but _acq is the
- weakest available. */
- (void ) atomic_compare_and_exchange_bool_acq (&map->l_tls_offset,
- offset,
- NO_TLS_OFFSET);
- val = map->l_tls_offset;
- assert (val != NO_TLS_OFFSET);
- }
- if (val != offset)
- {
- /* We'd like to set a static offset for this section, but another
- thread has already used a dynamic TLS block for it. Since we can
- only use static offsets if everyone does (and it's not practical
- to move that thread's dynamic block), we have to fail. */
- goto fail;
- }
- /* We installed the value; now update the globals. */
-#if TLS_TCB_AT_TP
- GL(dl_tls_static_used) = offset;
-#elif TLS_DTV_AT_TP
+ map->l_tls_offset = offset;
map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
GL(dl_tls_static_used) = used;
#else
@@ -143,17 +114,8 @@ void
internal_function __attribute_noinline__
_dl_allocate_static_tls (struct link_map *map)
{
- /* We wrap this in a signal mask because it has to iterate all threads
- (including this one) and update this map's TLS entry. A signal handler
- accessing TLS would try to do the same update and break. */
- sigset_t old;
- _dl_mask_all_signals (&old);
- int err = -1;
- if (map->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
- err = _dl_try_allocate_static_tls (map);
-
- _dl_unmask_signals (&old);
- if (err != 0)
+ if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ || _dl_try_allocate_static_tls (map))
{
_dl_signal_error (0, map->l_name, NULL, N_("\
cannot allocate memory in static TLS block"));
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 50ec876..c1802e7 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -17,7 +17,6 @@
<http://www.gnu.org/licenses/>. */
#include <assert.h>
-#include <atomic.h>
#include <errno.h>
#include <libintl.h>
#include <signal.h>
@@ -534,21 +533,19 @@ rtld_hidden_def (_dl_deallocate_tls)
# endif
-static void
-allocate_and_init (dtv_t *dtv, struct link_map *map)
+static void *
+allocate_and_init (struct link_map *map)
{
void *newp;
newp = __signal_safe_memalign (map->l_tls_align, map->l_tls_blocksize);
if (newp == NULL)
oom ();
- /* Initialize the memory. Since this is our thread's space, we are
- under a signal mask, and no one has touched this section before,
- we can safely just overwrite whatever's there. */
+ /* Initialize the memory. */
memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
'\0', map->l_tls_blocksize - map->l_tls_initimage_size);
- dtv->pointer.val = newp;
+ return newp;
}
@@ -590,15 +587,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
the entry we need. */
size_t new_gen = listp->slotinfo[idx].gen;
size_t total = 0;
- sigset_t old;
-
- _dl_mask_all_signals (&old);
- /* We use the signal mask as a lock against reentrancy here.
- Check that a signal taken before the lock didn't already
- update us. */
- dtv = THREAD_DTV ();
- if (dtv[0].counter >= listp->slotinfo[idx].gen)
- goto out;
+
/* We have to look through the entire dtv slotinfo list. */
listp = GL(dl_tls_dtv_slotinfo_list);
do
@@ -710,8 +699,6 @@ _dl_update_slotinfo (unsigned long int req_modid)
/* This will be the new maximum generation counter. */
dtv[0].counter = new_gen;
- out:
- _dl_unmask_signals (&old);
}
return the_map;
@@ -737,60 +724,39 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
the_map = listp->slotinfo[idx].map;
}
- sigset_t old;
- _dl_mask_all_signals (&old);
-
- /* As with update_slotinfo, we use the sigmask as a check against
- reentrancy. */
- if (dtv[GET_ADDR_MODULE].pointer.val != TLS_DTV_UNALLOCATED)
- goto out;
-
- /* Synchronize against a parallel dlopen() forcing this variable
- into static storage. If that happens, we have to be more careful
- about initializing the area, as that dlopen() will be iterating
- the threads to do so itself. */
- ptrdiff_t offset;
- if ((offset = the_map->l_tls_offset) == NO_TLS_OFFSET)
- {
- /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to
- change it go from NO_TLS_OFFSET to some other value. We use
- compare_and_exchange to ensure only one attempt succeeds. We
- don't actually need any memory ordering here, but _acq is the
- weakest available. */
- (void) atomic_compare_and_exchange_bool_acq (&the_map->l_tls_offset,
- FORCED_DYNAMIC_TLS_OFFSET,
- NO_TLS_OFFSET);
- offset = the_map->l_tls_offset;
- assert (offset != NO_TLS_OFFSET);
- }
- if (offset == FORCED_DYNAMIC_TLS_OFFSET)
- {
- allocate_and_init (&dtv[GET_ADDR_MODULE], the_map);
- }
- else
+
+ again:
+ /* Make sure that, if a dlopen running in parallel forces the
+ variable into static storage, we'll wait until the address in the
+ static TLS block is set up, and use that. If we're undecided
+ yet, make sure we make the decision holding the lock as well. */
+ if (__builtin_expect (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET, 0))
{
- void **pp = &dtv[GET_ADDR_MODULE].pointer.val;
- while (atomic_forced_read (*pp) == TLS_DTV_UNALLOCATED)
+ __rtld_lock_lock_recursive (GL(dl_load_lock));
+ if (__builtin_expect (the_map->l_tls_offset == NO_TLS_OFFSET, 1))
{
- /* for lack of a better (safe) thing to do, just spin.
- Someone else (not us; it's done under a signal mask) set
- this map to a static TLS offset, and they'll iterate all
- threads to initialize it. They'll eventually write
- to pointer.val, at which point we know they've fully
- completed initialization. */
- atomic_delay ();
+ the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ }
+ else
+ {
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ if (__builtin_expect (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET, 1))
+ {
+ void *p = dtv[GET_ADDR_MODULE].pointer.val;
+ if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ goto again;
+
+ return (char *) p + GET_ADDR_OFFSET;
+ }
}
- /* Make sure we've picked up their initialization of the actual
- block; this pairs against the write barrier in
- init_one_static_tls, guaranteeing that we see their write of
- the tls_initimage into the static region. */
- atomic_read_barrier ();
}
-out:
- assert (dtv[GET_ADDR_MODULE].pointer.val != TLS_DTV_UNALLOCATED);
- _dl_unmask_signals (&old);
+ void *p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
+ dtv[GET_ADDR_MODULE].pointer.is_static = false;
- return (char *) dtv[GET_ADDR_MODULE].pointer.val + GET_ADDR_OFFSET;
+ return (char *) p + GET_ADDR_OFFSET;
}