diff options
author | Ulrich Drepper <drepper@redhat.com> | 2008-05-13 05:41:30 +0000 |
---|---|---|
committer | Ulrich Drepper <drepper@redhat.com> | 2008-05-13 05:41:30 +0000 |
commit | c9ff0187a63b6bc535c9febd2c2eb1c5a843b2c1 (patch) | |
tree | 4d30dab17b7e46a98332aafbe886e62aa78228de /elf | |
parent | a3636e8b89a7c565b1141d56055a6a07a053792c (diff) | |
download | glibc-c9ff0187a63b6bc535c9febd2c2eb1c5a843b2c1.zip glibc-c9ff0187a63b6bc535c9febd2c2eb1c5a843b2c1.tar.gz glibc-c9ff0187a63b6bc535c9febd2c2eb1c5a843b2c1.tar.bz2 |
Introduce TLS descriptors for i386 and x86_64.
* include/inline-hashtab.h: New file, copied from 2005's
libiberty, with fix for memory leak imported afterwards by
Glauber de Oliveira Costa.
* elf/tlsdeschtab.h: New file.
* elf/dl-reloc.c (_dl_try_allocate_static_tls): Extract from...
(_dl_allocate_static_tls): ... here. Rearrange failure path.
(CHECK_STATIC_TLS): Move to...
* elf/dynamic-link.h: ... this file.
(TRY_STATIC_TLS): New macro.
* elf/dl-conflict.c (CHECK_STATIC_TLS, TRY_STATIC_TLS): Override.
* elf/elf.h (R_386_TLS_GOTDESC, R_386_TLS_DESC_CALL,
R_386_TLS_DESC): Define.
(R_X86_64_PC64, R_X86_GOTOFF64, R_X86_64_GOTPC32): Merge from
binutils.
(R_X86_64_GOTPC32_TLSDESC, R_X86_64_TLSDESC_CALL,
R_X86_64_TLSDESC): Define.
(R_386_NUM, R_X86_64_NUM): Adjust.
* sysdeps/i386/Makefile (sysdep-dl-routines, sysdep_routines,
systep-rtld-routines): Add tlsdesc and dl-tlsdesc for elf subdir.
(gen-as-const-headers): Add tlsdesc.sym to csu subdir.
* sysdeps/i386/dl-lookupcfg.h: New file. Introduce _dl_unmap to
release tlsdesc_table.
* sysdeps/i386/dl-machine.h: Include dl-tlsdesc.h.
(elf_machine_type_class): Mark R_386_TLS_DESC as PLT class.
(elf_machine_rel): Handle R_386_TLS_DESC.
(elf_machine_rela): Likewise.
(elf_machine_lazy_rel): Likewise.
(elf_machine_lazy_rela): Likewise.
* sysdeps/i386/dl-tls.h (struct dl_tls_index): Name it.
* sysdeps/i386/dl-tlsdesc.S: New file.
* sysdeps/i386/dl-tlsdesc.h: New file.
* sysdeps/i386/tlsdesc.c: New file.
* sysdeps/i386/tlsdesc.sym: New file.
* sysdeps/i386/bits/linkmap.h (struct link_map_machine): Add
tlsdesc_table.
* sysdeps/x86_64/Makefile (sysdep-dl-routines, sysdep_routines,
systep-rtld-routines): Add tlsdesc and dl-tlsdesc for elf subdir.
(gen-as-const-headers): Add tlsdesc.sym to csu subdir.
* sysdeps/x86_64/dl-lookupcfg.h: New file. Introduce _dl_unmap to
release tlsdesc_table.
* sysdeps/x86_64/dl-machine.h: Include dl-tlsdesc.h.
(elf_machine_runtime_setup): Set up lazy TLSDESC GOT entry.
(elf_machine_type_class): Mark R_X86_64_TLSDESC as PLT class.
(elf_machine_rel): Handle R_X86_64_TLSDESC.
(elf_machine_rela): Likewise.
(elf_machine_lazy_rel): Likewise.
* sysdeps/x86_64/dl-tls.h (struct dl_tls_index): Name it.
(__tls_get_addr): Do not declare for non-shared compiles.
* sysdeps/x86_64/dl-tlsdesc.S: New file.
* sysdeps/x86_64/dl-tlsdesc.h: New file.
* sysdeps/x86_64/tlsdesc.c: New file.
* sysdeps/x86_64/tlsdesc.sym: New file.
* sysdeps/x86_64/bits/linkmap.h (struct link_map_machine): Add
tlsdesc_table for both 32- and 64-bit structs.
Diffstat (limited to 'elf')
-rw-r--r-- | elf/dl-conflict.c | 9 | ||||
-rw-r--r-- | elf/dl-reloc.c | 42 | ||||
-rw-r--r-- | elf/dynamic-link.h | 27 | ||||
-rw-r--r-- | elf/elf.h | 22 | ||||
-rw-r--r-- | elf/tlsdeschtab.h | 157 |
5 files changed, 229 insertions, 28 deletions
diff --git a/elf/dl-conflict.c b/elf/dl-conflict.c index 9b49e77..b730105 100644 --- a/elf/dl-conflict.c +++ b/elf/dl-conflict.c @@ -1,5 +1,5 @@ /* Resolve conflicts against already prelinked libraries. - Copyright (C) 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc. + Copyright (C) 2001, 2002, 2003, 2004, 2005, 2008 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Jakub Jelinek <jakub@redhat.com>, 2001. @@ -44,7 +44,6 @@ _dl_resolve_conflicts (struct link_map *l, ElfW(Rela) *conflict, /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code. */ #define RESOLVE_MAP(ref, version, flags) (*ref = NULL, NULL) #define RESOLVE(ref, version, flags) (*ref = NULL, 0) -#define CHECK_STATIC_TLS(ref_map, sym_map) ((void) 0) #define RESOLVE_CONFLICT_FIND_MAP(map, r_offset) \ do { \ while ((resolve_conflict_map->l_map_end < (ElfW(Addr)) (r_offset)) \ @@ -61,6 +60,12 @@ _dl_resolve_conflicts (struct link_map *l, ElfW(Rela) *conflict, #include "dynamic-link.h" + /* Override these, defined in dynamic-link.h. */ +#undef CHECK_STATIC_TLS +#define CHECK_STATIC_TLS(ref_map, sym_map) ((void) 0) +#undef TRY_STATIC_TLS +#define TRY_STATIC_TLS(ref_map, sym_map) (0) + GL(dl_num_cache_relocations) += conflictend - conflict; for (; conflict < conflictend; ++conflict) diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c index e9784c2..a303cb4 100644 --- a/elf/dl-reloc.c +++ b/elf/dl-reloc.c @@ -1,5 +1,5 @@ /* Relocate a shared object and resolve its references to other loaded objects. - Copyright (C) 1995-2004, 2005, 2006 Free Software Foundation, Inc. + Copyright (C) 1995-2004, 2005, 2006, 2008 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -43,9 +43,9 @@ This function intentionally does not return any value but signals error directly, as static TLS should be rare and code handling it should not be inlined as much as possible. */ -void -internal_function __attribute_noinline__ -_dl_allocate_static_tls (struct link_map *map) +int +internal_function +_dl_try_allocate_static_tls (struct link_map *map) { /* If we've already used the variable with dynamic access, or if the alignment requirements are too high, fail. */ @@ -53,8 +53,7 @@ _dl_allocate_static_tls (struct link_map *map) || map->l_tls_align > GL(dl_tls_static_align)) { fail: - _dl_signal_error (0, map->l_name, NULL, N_("\ -cannot allocate memory in static TLS block")); + return -1; } #if TLS_TCB_AT_TP @@ -108,6 +107,20 @@ cannot allocate memory in static TLS block")); } else map->l_need_tls_init = 1; + + return 0; +} + +void +internal_function __attribute_noinline__ +_dl_allocate_static_tls (struct link_map *map) +{ + if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET + || _dl_try_allocate_static_tls (map)) + { + _dl_signal_error (0, map->l_name, NULL, N_("\ +cannot allocate memory in static TLS block")); + } } /* Initialize static TLS area and DTV for current (only) thread. @@ -248,23 +261,6 @@ _dl_relocate_object (struct link_map *l, struct r_scope_elem *scope[], l->l_lookup_cache.value = _lr; })) \ : l) - /* This macro is used as a callback from elf_machine_rel{a,} when a - static TLS reloc is about to be performed. Since (in dl-load.c) we - permit dynamic loading of objects that might use such relocs, we - have to check whether each use is actually doable. If the object - whose TLS segment the reference resolves to was allocated space in - the static TLS block at startup, then it's ok. Otherwise, we make - an attempt to allocate it in surplus space on the fly. If that - can't be done, we fall back to the error that DF_STATIC_TLS is - intended to produce. */ -#define CHECK_STATIC_TLS(map, sym_map) \ - do { \ - if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET \ - || ((sym_map)->l_tls_offset \ - == FORCED_DYNAMIC_TLS_OFFSET), 0)) \ - _dl_allocate_static_tls (sym_map); \ - } while (0) - #include "dynamic-link.h" ELF_DYNAMIC_RELOCATE (l, lazy, consider_profiling); diff --git a/elf/dynamic-link.h b/elf/dynamic-link.h index 7eb9a36..c34cbcd 100644 --- a/elf/dynamic-link.h +++ b/elf/dynamic-link.h @@ -1,5 +1,5 @@ /* Inline functions for dynamic linking. - Copyright (C) 1995-2005, 2006 Free Software Foundation, Inc. + Copyright (C) 1995-2005, 2006, 2008 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -17,6 +17,31 @@ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ +/* This macro is used as a callback from elf_machine_rel{a,} when a + static TLS reloc is about to be performed. Since (in dl-load.c) we + permit dynamic loading of objects that might use such relocs, we + have to check whether each use is actually doable. If the object + whose TLS segment the reference resolves to was allocated space in + the static TLS block at startup, then it's ok. Otherwise, we make + an attempt to allocate it in surplus space on the fly. If that + can't be done, we fall back to the error that DF_STATIC_TLS is + intended to produce. */ +#define CHECK_STATIC_TLS(map, sym_map) \ + do { \ + if (__builtin_expect ((sym_map)->l_tls_offset == NO_TLS_OFFSET \ + || ((sym_map)->l_tls_offset \ + == FORCED_DYNAMIC_TLS_OFFSET), 0)) \ + _dl_allocate_static_tls (sym_map); \ + } while (0) + +#define TRY_STATIC_TLS(map, sym_map) \ + (__builtin_expect ((sym_map)->l_tls_offset \ + != FORCED_DYNAMIC_TLS_OFFSET, 1) \ + && (__builtin_expect ((sym_map)->l_tls_offset != NO_TLS_OFFSET, 1) \ + || _dl_try_allocate_static_tls (sym_map) == 0)) + +int internal_function _dl_try_allocate_static_tls (struct link_map *map); + #include <elf.h> #include <assert.h> @@ -1159,8 +1159,17 @@ typedef struct #define R_386_TLS_DTPMOD32 35 /* ID of module containing symbol */ #define R_386_TLS_DTPOFF32 36 /* Offset in TLS block */ #define R_386_TLS_TPOFF32 37 /* Negated offset in static TLS block */ +/* 38? */ +#define R_386_TLS_GOTDESC 39 /* GOT offset for TLS descriptor. */ +#define R_386_TLS_DESC_CALL 40 /* Marker of call through TLS + descriptor for + relaxation. */ +#define R_386_TLS_DESC 41 /* TLS descriptor containing + pointer to code and to + argument, returning the TLS + offset for the symbol. */ /* Keep this the last entry. */ -#define R_386_NUM 38 +#define R_386_NUM 42 /* SUN SPARC specific definitions. */ @@ -2557,8 +2566,17 @@ typedef Elf32_Addr Elf32_Conflict; #define R_X86_64_GOTTPOFF 22 /* 32 bit signed PC relative offset to GOT entry for IE symbol */ #define R_X86_64_TPOFF32 23 /* Offset in initial TLS block */ +#define R_X86_64_PC64 24 /* PC relative 64 bit */ +#define R_X86_64_GOTOFF64 25 /* 64 bit offset to GOT */ +#define R_X86_64_GOTPC32 26 /* 32 bit signed pc relative + offset to GOT */ +/* 27 .. 33 */ +#define R_X86_64_GOTPC32_TLSDESC 34 /* GOT offset for TLS descriptor. */ +#define R_X86_64_TLSDESC_CALL 35 /* Marker for call through TLS + descriptor. */ +#define R_X86_64_TLSDESC 36 /* TLS descriptor. */ -#define R_X86_64_NUM 24 +#define R_X86_64_NUM 37 /* AM33 relocations. */ diff --git a/elf/tlsdeschtab.h b/elf/tlsdeschtab.h new file mode 100644 index 0000000..c3cbc3f --- /dev/null +++ b/elf/tlsdeschtab.h @@ -0,0 +1,157 @@ +/* Hash table for TLS descriptors. + Copyright (C) 2005, 2008 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Alexandre Oliva <aoliva@redhat.com> + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, write to the Free + Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA + 02111-1307 USA. */ + +#ifndef TLSDESCHTAB_H +# define TLSDESCHTAB_H 1 + +# ifdef SHARED + +# include <inline-hashtab.h> + +inline static int +hash_tlsdesc (void *p) +{ + struct tlsdesc_dynamic_arg *td = p; + + /* We know all entries are for the same module, so ti_offset is the + only distinguishing entry. */ + return td->tlsinfo.ti_offset; +} + +inline static int +eq_tlsdesc (void *p, void *q) +{ + struct tlsdesc_dynamic_arg *tdp = p, *tdq = q; + + return tdp->tlsinfo.ti_offset == tdq->tlsinfo.ti_offset; +} + +inline static int +map_generation (struct link_map *map) +{ + size_t idx = map->l_tls_modid; + struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list); + + /* Find the place in the dtv slotinfo list. */ + do + { + /* Does it fit in the array of this list element? */ + if (idx < listp->len) + { + /* We should never get here for a module in static TLS, so + we can assume that, if the generation count is zero, we + still haven't determined the generation count for this + module. */ + if (listp->slotinfo[idx].gen) + return listp->slotinfo[idx].gen; + else + break; + } + idx -= listp->len; + listp = listp->next; + } + while (listp != NULL); + + /* If we get to this point, the module still hasn't been assigned an + entry in the dtv slotinfo data structures, and it will when we're + done with relocations. At that point, the module will get a + generation number that is one past the current generation, so + return exactly that. */ + return GL(dl_tls_generation) + 1; +} + +void * +internal_function +_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset) +{ + struct hashtab *ht; + void **entry; + struct tlsdesc_dynamic_arg *td, test; + + /* FIXME: We could use a per-map lock here, but is it worth it? */ + __rtld_lock_lock_recursive (GL(dl_load_lock)); + + ht = map->l_mach.tlsdesc_table; + if (! ht) + { + ht = htab_create (); + if (! ht) + { + __rtld_lock_unlock_recursive (GL(dl_load_lock)); + return 0; + } + map->l_mach.tlsdesc_table = ht; + } + + test.tlsinfo.ti_module = map->l_tls_modid; + test.tlsinfo.ti_offset = ti_offset; + entry = htab_find_slot (ht, &test, 1, hash_tlsdesc, eq_tlsdesc); + if (*entry) + { + td = *entry; + __rtld_lock_unlock_recursive (GL(dl_load_lock)); + return td; + } + + *entry = td = malloc (sizeof (struct tlsdesc_dynamic_arg)); + /* This may be higher than the map's generation, but it doesn't + matter much. Worst case, we'll have one extra DTV update per + thread. */ + td->gen_count = map_generation (map); + td->tlsinfo = test.tlsinfo; + + __rtld_lock_unlock_recursive (GL(dl_load_lock)); + return td; +} + +# endif /* SHARED */ + +/* The idea of the following two functions is to stop multiple threads + from attempting to resolve the same TLS descriptor without busy + waiting. Ideally, we should be able to release the lock right + after changing td->entry, and then using say a condition variable + or a futex wake to wake up any waiting threads, but let's try to + avoid introducing such dependencies. */ + +inline static int +_dl_tlsdesc_resolve_early_return_p (struct tlsdesc volatile *td, void *caller) +{ + if (caller != td->entry) + return 1; + + __rtld_lock_lock_recursive (GL(dl_load_lock)); + if (caller != td->entry) + { + __rtld_lock_unlock_recursive (GL(dl_load_lock)); + return 1; + } + + td->entry = _dl_tlsdesc_resolve_hold; + + return 0; +} + +inline static void +_dl_tlsdesc_wake_up_held_fixups (void) +{ + __rtld_lock_unlock_recursive (GL(dl_load_lock)); +} + +#endif |