aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlorian Weimer <fweimer@redhat.com>2025-01-16 20:02:42 +0100
committerFlorian Weimer <fweimer@redhat.com>2025-01-16 20:02:42 +0100
commit7f784ffc173b5a2166ff846fd003a2264d614456 (patch)
treeaae075df1710ce59fb449d88646725bbd42026f5
parent0b795abd62282767a219a72c8c6fef753bacedfa (diff)
downloadglibc-7f784ffc173b5a2166ff846fd003a2264d614456.zip
glibc-7f784ffc173b5a2166ff846fd003a2264d614456.tar.gz
glibc-7f784ffc173b5a2166ff846fd003a2264d614456.tar.bz2
elf: Iterate over loaded object list in _dl_determine_tlsoffset
The old code used the slotinfo array as a scratch area to pass the list of TLS-using objects to _dl_determine_tlsoffset. All array entries are subsequently overwritten by _dl_add_to_slotinfo, except the first one. The link maps are usually not at their right position for their module ID in the slotinfo array, so the initial use of the slotinfo array would be incorrect if not for scratch purposes only. In _dl_tls_initial_modid_limit_setup, the old code relied that some link map was written to the first slotinfo entry. After the change, this no longer happens because TLS module ID zero is unused. It's also necessary to move the call after the real initialization of the slotinfo array.
-rw-r--r--elf/dl-tls.c73
-rw-r--r--elf/rtld.c23
2 files changed, 39 insertions, 57 deletions
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 45ea058..647deaf 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -245,14 +245,6 @@ _dl_determine_tlsoffset (void)
size_t freetop = 0;
size_t freebottom = 0;
- /* The first element of the dtv slot info list is allocated. */
- assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
- /* There is at this point only one element in the
- dl_tls_dtv_slotinfo_list list. */
- assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
-
- struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
-
/* Determining the offset of the various parts of the static TLS
block has several dependencies. In addition we have to work
around bugs in some toolchains.
@@ -285,19 +277,21 @@ _dl_determine_tlsoffset (void)
/* We simply start with zero. */
size_t offset = 0;
- for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
+ for (struct link_map *l = GL(dl_ns)[LM_ID_BASE]._ns_loaded; l != NULL;
+ l = l->l_next)
{
- assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
+ if (l->l_tls_blocksize == 0)
+ continue;
- size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
- & (slotinfo[cnt].map->l_tls_align - 1));
+ size_t firstbyte = (-l->l_tls_firstbyte_offset
+ & (l->l_tls_align - 1));
size_t off;
- max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
+ max_align = MAX (max_align, l->l_tls_align);
- if (freebottom - freetop >= slotinfo[cnt].map->l_tls_blocksize)
+ if (freebottom - freetop >= l->l_tls_blocksize)
{
- off = roundup (freetop + slotinfo[cnt].map->l_tls_blocksize
- - firstbyte, slotinfo[cnt].map->l_tls_align)
+ off = roundup (freetop + l->l_tls_blocksize
+ - firstbyte, l->l_tls_align)
+ firstbyte;
if (off <= freebottom)
{
@@ -305,24 +299,24 @@ _dl_determine_tlsoffset (void)
/* XXX For some architectures we perhaps should store the
negative offset. */
- slotinfo[cnt].map->l_tls_offset = off;
+ l->l_tls_offset = off;
continue;
}
}
- off = roundup (offset + slotinfo[cnt].map->l_tls_blocksize - firstbyte,
- slotinfo[cnt].map->l_tls_align) + firstbyte;
- if (off > offset + slotinfo[cnt].map->l_tls_blocksize
+ off = roundup (offset + l->l_tls_blocksize - firstbyte,
+ l->l_tls_align) + firstbyte;
+ if (off > offset + l->l_tls_blocksize
+ (freebottom - freetop))
{
freetop = offset;
- freebottom = off - slotinfo[cnt].map->l_tls_blocksize;
+ freebottom = off - l->l_tls_blocksize;
}
offset = off;
/* XXX For some architectures we perhaps should store the
negative offset. */
- slotinfo[cnt].map->l_tls_offset = off;
+ l->l_tls_offset = off;
}
/* Insert the extra TLS block after the last TLS block. */
@@ -366,41 +360,43 @@ _dl_determine_tlsoffset (void)
/* The TLS blocks start right after the TCB. */
size_t offset = TLS_TCB_SIZE;
- for (size_t cnt = 0; slotinfo[cnt].map != NULL; ++cnt)
+ for (struct link_map *l = GL(dl_ns)[LM_ID_BASE]._ns_loaded; l != NULL;
+ l = l->l_next)
{
- assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
+ if (l->l_tls_blocksize == 0)
+ continue;
- size_t firstbyte = (-slotinfo[cnt].map->l_tls_firstbyte_offset
- & (slotinfo[cnt].map->l_tls_align - 1));
+ size_t firstbyte = (-l->l_tls_firstbyte_offset
+ & (l->l_tls_align - 1));
size_t off;
- max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
+ max_align = MAX (max_align, l->l_tls_align);
- if (slotinfo[cnt].map->l_tls_blocksize <= freetop - freebottom)
+ if (l->l_tls_blocksize <= freetop - freebottom)
{
- off = roundup (freebottom, slotinfo[cnt].map->l_tls_align);
+ off = roundup (freebottom, l->l_tls_align);
if (off - freebottom < firstbyte)
- off += slotinfo[cnt].map->l_tls_align;
- if (off + slotinfo[cnt].map->l_tls_blocksize - firstbyte <= freetop)
+ off += l->l_tls_align;
+ if (off + l->l_tls_blocksize - firstbyte <= freetop)
{
- slotinfo[cnt].map->l_tls_offset = off - firstbyte;
- freebottom = (off + slotinfo[cnt].map->l_tls_blocksize
+ l->l_tls_offset = off - firstbyte;
+ freebottom = (off + l->l_tls_blocksize
- firstbyte);
continue;
}
}
- off = roundup (offset, slotinfo[cnt].map->l_tls_align);
+ off = roundup (offset, l->l_tls_align);
if (off - offset < firstbyte)
- off += slotinfo[cnt].map->l_tls_align;
+ off += l->l_tls_align;
- slotinfo[cnt].map->l_tls_offset = off - firstbyte;
+ l->l_tls_offset = off - firstbyte;
if (off - firstbyte - offset > freetop - freebottom)
{
freebottom = offset;
freetop = off - firstbyte;
}
- offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
+ offset = off + l->l_tls_blocksize - firstbyte;
}
/* Insert the extra TLS block after the last TLS block. */
@@ -1157,7 +1153,8 @@ _dl_tls_initial_modid_limit_setup (void)
{
struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
size_t idx;
- for (idx = 0; idx < listp->len; ++idx)
+ /* Start with 1 because TLS module ID zero is unused. */
+ for (idx = 1; idx < listp->len; ++idx)
{
struct link_map *l = listp->slotinfo[idx].map;
if (l == NULL
diff --git a/elf/rtld.c b/elf/rtld.c
index f32058b..00bec15 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -752,26 +752,9 @@ init_tls (size_t naudit)
/* No need to check the return value. If memory allocation failed
the program would have been terminated. */
- struct dtv_slotinfo *slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
GL(dl_tls_dtv_slotinfo_list)->len = nelem;
GL(dl_tls_dtv_slotinfo_list)->next = NULL;
- /* Fill in the information from the loaded modules. No namespace
- but the base one can be filled at this time. */
- assert (GL(dl_ns)[LM_ID_BASE + 1]._ns_loaded == NULL);
- int i = 0;
- for (struct link_map *l = GL(dl_ns)[LM_ID_BASE]._ns_loaded; l != NULL;
- l = l->l_next)
- if (l->l_tls_blocksize != 0)
- {
- /* This is a module with TLS data. Store the map reference.
- The generation counter is zero. */
- slotinfo[i].map = l;
- /* slotinfo[i].gen = 0; */
- ++i;
- }
- assert (i == GL(dl_tls_max_dtv_idx));
-
/* Calculate the size of the static TLS surplus. */
_dl_tls_static_surplus_init (naudit);
@@ -788,8 +771,6 @@ init_tls (size_t naudit)
_dl_fatal_printf ("\
cannot allocate TLS data structures for initial thread\n");
- _dl_tls_initial_modid_limit_setup ();
-
/* Store for detection of the special case by __tls_get_addr
so it knows not to pass this dtv to the normal realloc. */
GL(dl_initial_dtv) = GET_DTV (tcbp);
@@ -2293,6 +2274,10 @@ dl_main (const ElfW(Phdr) *phdr,
}
rtld_timer_stop (&relocate_time, start);
+ /* This call must come after the slotinfo array has been filled in
+ using _dl_add_to_slotinfo. */
+ _dl_tls_initial_modid_limit_setup ();
+
/* Now enable profiling if needed. Like the previous call,
this has to go here because the calls it makes should use the
rtld versions of the functions (particularly calloc()), but it