aboutsummaryrefslogtreecommitdiff
path: root/elf/dl-tls.c
diff options
context:
space:
mode:
authorMichael Jeanson <mjeanson@efficios.com>2024-07-10 15:48:11 -0400
committerMichael Jeanson <mjeanson@efficios.com>2025-01-10 20:19:28 +0000
commit0e411c5d3098982d67cd2d7a233eaa6c977a1869 (patch)
treee69eef11d9d92dc76c6115eef5512c3aae6878aa /elf/dl-tls.c
parentc813c1490d5d8640a94fced10fc7674a48737b96 (diff)
downloadglibc-0e411c5d3098982d67cd2d7a233eaa6c977a1869.zip
glibc-0e411c5d3098982d67cd2d7a233eaa6c977a1869.tar.gz
glibc-0e411c5d3098982d67cd2d7a233eaa6c977a1869.tar.bz2
Add generic 'extra TLS'
Add the logic to append an 'extra TLS' block in the TLS block allocator with a generic stub implementation. The duplicated code in 'csu/libc-tls.c' and 'elf/dl-tls.c' is to handle both statically linked applications and the ELF dynamic loader. Signed-off-by: Michael Jeanson <mjeanson@efficios.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Reviewed-by: Florian Weimer <fweimer@redhat.com>
Diffstat (limited to 'elf/dl-tls.c')
-rw-r--r--elf/dl-tls.c72
1 files changed, 72 insertions, 0 deletions
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index c2d1726..45ea058 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -36,6 +36,8 @@
#define TUNABLE_NAMESPACE rtld
#include <dl-tunables.h>
+#include <dl-extra_tls.h>
+
/* Surplus static TLS, GLRO(dl_tls_static_surplus), is used for
- IE TLS in libc.so for all dlmopen namespaces except in the initial
@@ -323,6 +325,39 @@ _dl_determine_tlsoffset (void)
slotinfo[cnt].map->l_tls_offset = off;
}
+ /* Insert the extra TLS block after the last TLS block. */
+
+ /* Extra TLS block for internal usage to append at the end of the TLS blocks
+ (in allocation order). The address at which the block is allocated must
+ be aligned to 'extra_tls_align'. The size of the block as returned by
+ '_dl_extra_tls_get_size ()' is always a multiple of the aligment.
+
+ On Linux systems this is where the rseq area will be allocated. On other
+ systems it is currently unused and both values will be '0'. */
+ size_t extra_tls_size = _dl_extra_tls_get_size ();
+ size_t extra_tls_align = _dl_extra_tls_get_align ();
+
+ /* Increase the maximum alignment with the extra TLS alignment requirements
+ if necessary. */
+ max_align = MAX (max_align, extra_tls_align);
+
+ /* Add the extra TLS block to the global offset. To ensure proper alignment,
+ first align the current global offset to the extra TLS block requirements
+ and then add the extra TLS block size. Both values respect the extra TLS
+ alignment requirements and so does the resulting offset. */
+ offset = roundup (offset, extra_tls_align ?: 1) + extra_tls_size;
+
+ /* Record the extra TLS offset.
+
+ With TLS_TCB_AT_TP the TLS blocks are allocated before the thread pointer
+ in reverse order. Our block is added last which results in it being the
+ first in the static TLS block, thus record the most negative offset.
+
+ The alignment requirements of the pointer resulting from this offset and
+ the thread pointer are enforced by 'max_align' which is used to align the
+ tcb_offset. */
+ _dl_extra_tls_set_offset (-offset);
+
GL(dl_tls_static_used) = offset;
GLRO (dl_tls_static_size) = (roundup (offset + GLRO(dl_tls_static_surplus),
max_align)
@@ -368,6 +403,43 @@ _dl_determine_tlsoffset (void)
offset = off + slotinfo[cnt].map->l_tls_blocksize - firstbyte;
}
+ /* Insert the extra TLS block after the last TLS block. */
+
+ /* Extra TLS block for internal usage to append at the end of the TLS blocks
+ (in allocation order). The address at which the block is allocated must
+ be aligned to 'extra_tls_align'. The size of the block as returned by
+ '_dl_extra_tls_get_size ()' is always a multiple of the aligment.
+
+ On Linux systems this is where the rseq area will be allocated. On other
+ systems it is currently unused and both values will be '0'. */
+ size_t extra_tls_size = _dl_extra_tls_get_size ();
+ size_t extra_tls_align = _dl_extra_tls_get_align ();
+
+ /* Increase the maximum alignment with the extra TLS alignment requirements
+ if necessary. */
+ max_align = MAX (max_align, extra_tls_align);
+
+ /* Align the global offset to the beginning of the extra TLS block. */
+ offset = roundup (offset, extra_tls_align ?: 1);
+
+ /* Record the extra TLS offset.
+
+ With TLS_DTV_AT_TP the TLS blocks are allocated after the thread pointer in
+ order. Our block is added last which results in it being the last in the
+ static TLS block, thus record the offset as the size of the static TLS
+ block minus the size of our block.
+
+ On some architectures the TLS blocks are offset from the thread pointer,
+ include this offset in the extra TLS block offset.
+
+ The alignment requirements of the pointer resulting from this offset and
+ the thread pointer are enforced by 'max_align' which is used to align the
+ tcb_offset. */
+ _dl_extra_tls_set_offset (offset - TLS_TP_OFFSET);
+
+ /* Add the extra TLS block to the global offset. */
+ offset += extra_tls_size;
+
GL(dl_tls_static_used) = offset;
GLRO (dl_tls_static_size) = roundup (offset + GLRO(dl_tls_static_surplus),
TCB_ALIGNMENT);