aboutsummaryrefslogtreecommitdiff
path: root/elf
diff options
context:
space:
mode:
Diffstat (limited to 'elf')
-rw-r--r--elf/dl-load.c2
-rw-r--r--elf/dl-minimal.c22
-rw-r--r--elf/rtld.c22
3 files changed, 28 insertions, 18 deletions
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 2955bc5..aabbf21 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -949,7 +949,7 @@ _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
l->l_tls_initimage_size = ph->p_filesz;
/* Since we don't know the load address yet only store the
offset. We will adjust it later. */
- l->l_tls_initimage = (void *) ph->p_offset;
+ l->l_tls_initimage = (void *) ph->p_vaddr;
/* Assign the next available module ID. */
l->l_tls_modid = _dl_next_tls_modid ();
diff --git a/elf/dl-minimal.c b/elf/dl-minimal.c
index 2e45fc0..893a3b1 100644
--- a/elf/dl-minimal.c
+++ b/elf/dl-minimal.c
@@ -46,8 +46,9 @@ extern unsigned long int weak_function strtoul (const char *nptr,
char **endptr, int base);
+/* Allocate an aligned memory block. */
void * weak_function
-malloc (size_t n)
+__libc_memalign (size_t align, size_t n)
{
#ifdef MAP_ANON
#define _dl_zerofd (-1)
@@ -70,8 +71,8 @@ malloc (size_t n)
}
/* Make sure the allocation pointer is ideally aligned. */
- alloc_ptr = (void *) 0 + (((alloc_ptr - (void *) 0) + sizeof (double) - 1)
- & ~(sizeof (double) - 1));
+ alloc_ptr = (void *) 0 + (((alloc_ptr - (void *) 0) + align - 1)
+ & ~(align - 1));
if (alloc_ptr + n >= alloc_end)
{
@@ -91,6 +92,12 @@ malloc (size_t n)
return alloc_last_block;
}
+void * weak_function
+malloc (size_t n)
+{
+ return __libc_memalign (sizeof (double), n);
+}
+
/* We use this function occasionally since the real implementation may
be optimized when it can assume the memory it returns already is
set to NUL. */
@@ -124,15 +131,6 @@ realloc (void *ptr, size_t n)
assert (new == ptr);
return new;
}
-
-/* Return alligned memory block. */
-void * weak_function
-__libc_memalign (size_t align, size_t n)
-{
- void *newp = malloc (n + align - 1);
-
- return (void *) roundup ((uintptr_t) newp, align);
-}
/* Avoid signal frobnication in setjmp/longjmp. Keeps things smaller. */
diff --git a/elf/rtld.c b/elf/rtld.c
index 82fe809..1472c18 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -191,7 +191,7 @@ _dl_start (void *arg)
assert (bootstrap_map.l_tls_blocksize != 0);
bootstrap_map.l_tls_initimage_size = phdr[cnt].p_filesz;
bootstrap_map.l_tls_initimage = (void *) (bootstrap_map.l_addr
- + phdr[cnt].p_offset);
+ + phdr[cnt].p_vaddr);
/* We can now allocate the initial TLS block. This can happen
on the stack. We'll get the final memory later when we
@@ -1087,13 +1087,10 @@ of this helper program; chances are you did not intend to run this program.\n\
for the thread descriptor. The memory for the TLS block will
never be freed. It should be allocated accordingly. The dtv
array can be changed if dynamic loading requires it. */
- tcbp = INTUSE(_dl_allocate_tls) ();
+ tcbp = _dl_allocate_tls_storage ();
if (tcbp == NULL)
_dl_fatal_printf ("\
cannot allocate TLS data structures for initial thread");
-
- /* And finally install it for the main thread. */
- TLS_INIT_TP (tcbp);
}
#endif
@@ -1445,6 +1442,21 @@ cannot allocate TLS data structures for initial thread");
we need it in the memory handling later. */
GL(dl_initial_searchlist) = *GL(dl_main_searchlist);
+#ifdef USE_TLS
+# ifndef SHARED
+ if (GL(dl_tls_max_dtv_idx) > 0)
+# endif
+ {
+ /* Now that we have completed relocation, the initializer data
+ for the TLS blocks has its final values and we can copy them
+ into the main thread's TLS area, which we allocated above. */
+ _dl_allocate_tls_init (tcbp);
+
+ /* And finally install it for the main thread. */
+ TLS_INIT_TP (tcbp);
+ }
+#endif
+
{
/* Initialize _r_debug. */
struct r_debug *r = _dl_debug_initialize (GL(dl_rtld_map).l_addr);