aboutsummaryrefslogtreecommitdiff
path: root/bfd/elflink.c
diff options
context:
space:
mode:
Diffstat (limited to 'bfd/elflink.c')
-rw-r--r--bfd/elflink.c466
1 files changed, 400 insertions, 66 deletions
diff --git a/bfd/elflink.c b/bfd/elflink.c
index 4b20749..9d65b5f 100644
--- a/bfd/elflink.c
+++ b/bfd/elflink.c
@@ -240,12 +240,30 @@ _bfd_elf_link_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info)
if (!_bfd_elf_define_linkage_sym (abfd, info, s, "_DYNAMIC"))
return FALSE;
- s = bfd_make_section_with_flags (abfd, ".hash",
- flags | SEC_READONLY);
- if (s == NULL
- || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
- return FALSE;
- elf_section_data (s)->this_hdr.sh_entsize = bed->s->sizeof_hash_entry;
+ if (info->emit_hash)
+ {
+ s = bfd_make_section_with_flags (abfd, ".hash", flags | SEC_READONLY);
+ if (s == NULL
+ || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
+ return FALSE;
+ elf_section_data (s)->this_hdr.sh_entsize = bed->s->sizeof_hash_entry;
+ }
+
+ if (info->emit_gnu_hash)
+ {
+ s = bfd_make_section_with_flags (abfd, ".gnu.hash",
+ flags | SEC_READONLY);
+ if (s == NULL
+ || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
+ return FALSE;
+ /* For 64-bit ELF, .gnu.hash is a non-uniform entity size section:
+ 4 32-bit words followed by variable count of 64-bit words, then
+ variable count of 32-bit words. */
+ if (bed->s->arch_size == 64)
+ elf_section_data (s)->this_hdr.sh_entsize = 0;
+ else
+ elf_section_data (s)->this_hdr.sh_entsize = 4;
+ }
/* Let the backend create the rest of the sections. This lets the
backend set the right flags. The backend will normally create
@@ -4811,6 +4829,131 @@ elf_collect_hash_codes (struct elf_link_hash_entry *h, void *data)
return TRUE;
}
+struct collect_gnu_hash_codes
+{
+ bfd *output_bfd;
+ const struct elf_backend_data *bed;
+ unsigned long int nsyms;
+ unsigned long int maskbits;
+ unsigned long int *hashcodes;
+ unsigned long int *hashval;
+ unsigned long int *indx;
+ unsigned long int *counts;
+ bfd_vma *bitmask;
+ bfd_byte *contents;
+ long int min_dynindx;
+ unsigned long int bucketcount;
+ unsigned long int symindx;
+ long int local_indx;
+ long int shift1, shift2;
+ unsigned long int mask;
+};
+
+/* This function will be called though elf_link_hash_traverse to store
+ all hash value of the exported symbols in an array. */
+
+static bfd_boolean
+elf_collect_gnu_hash_codes (struct elf_link_hash_entry *h, void *data)
+{
+ struct collect_gnu_hash_codes *s = data;
+ const char *name;
+ char *p;
+ unsigned long ha;
+ char *alc = NULL;
+
+ if (h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ /* Ignore indirect symbols. These are added by the versioning code. */
+ if (h->dynindx == -1)
+ return TRUE;
+
+ /* Ignore also local symbols and undefined symbols. */
+ if (! (*s->bed->elf_hash_symbol) (h))
+ return TRUE;
+
+ name = h->root.root.string;
+ p = strchr (name, ELF_VER_CHR);
+ if (p != NULL)
+ {
+ alc = bfd_malloc (p - name + 1);
+ memcpy (alc, name, p - name);
+ alc[p - name] = '\0';
+ name = alc;
+ }
+
+ /* Compute the hash value. */
+ ha = bfd_elf_gnu_hash (name);
+
+ /* Store the found hash value in the array for compute_bucket_count,
+ and also for .dynsym reordering purposes. */
+ s->hashcodes[s->nsyms] = ha;
+ s->hashval[h->dynindx] = ha;
+ ++s->nsyms;
+ if (s->min_dynindx < 0 || s->min_dynindx > h->dynindx)
+ s->min_dynindx = h->dynindx;
+
+ if (alc != NULL)
+ free (alc);
+
+ return TRUE;
+}
+
+/* This function will be called though elf_link_hash_traverse to do
+ final dynaminc symbol renumbering. */
+
+static bfd_boolean
+elf_renumber_gnu_hash_syms (struct elf_link_hash_entry *h, void *data)
+{
+ struct collect_gnu_hash_codes *s = data;
+ unsigned long int bucket;
+ unsigned long int val;
+
+ if (h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ /* Ignore indirect symbols. */
+ if (h->dynindx == -1)
+ return TRUE;
+
+ /* Ignore also local symbols and undefined symbols. */
+ if (! (*s->bed->elf_hash_symbol) (h))
+ {
+ if (h->dynindx >= s->min_dynindx)
+ h->dynindx = s->local_indx++;
+ return TRUE;
+ }
+
+ bucket = s->hashval[h->dynindx] % s->bucketcount;
+ val = (s->hashval[h->dynindx] >> s->shift1)
+ & ((s->maskbits >> s->shift1) - 1);
+ s->bitmask[val] |= ((bfd_vma) 1) << (s->hashval[h->dynindx] & s->mask);
+ s->bitmask[val]
+ |= ((bfd_vma) 1) << ((s->hashval[h->dynindx] >> s->shift2) & s->mask);
+ val = s->hashval[h->dynindx] & ~(unsigned long int) 1;
+ if (s->counts[bucket] == 1)
+ /* Last element terminates the chain. */
+ val |= 1;
+ bfd_put_32 (s->output_bfd, val,
+ s->contents + (s->indx[bucket] - s->symindx) * 4);
+ --s->counts[bucket];
+ h->dynindx = s->indx[bucket]++;
+ return TRUE;
+}
+
+/* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
+
+bfd_boolean
+_bfd_elf_hash_symbol (struct elf_link_hash_entry *h)
+{
+ return !(h->forced_local
+ || h->root.type == bfd_link_hash_undefined
+ || h->root.type == bfd_link_hash_undefweak
+ || ((h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && h->root.u.def.section->output_section == NULL));
+}
+
/* Array used to determine the number of hash table buckets to use
based on the number of symbols there are. If there are fewer than
3 symbols we use 1 bucket, fewer than 17 symbols we use 3 buckets,
@@ -4832,42 +4975,26 @@ static const size_t elf_buckets[] =
Therefore the result is always a good payoff between few collisions
(= short chain lengths) and table size. */
static size_t
-compute_bucket_count (struct bfd_link_info *info)
+compute_bucket_count (struct bfd_link_info *info, unsigned long int *hashcodes,
+ unsigned long int nsyms, int gnu_hash)
{
size_t dynsymcount = elf_hash_table (info)->dynsymcount;
size_t best_size = 0;
- unsigned long int *hashcodes;
- unsigned long int *hashcodesp;
unsigned long int i;
bfd_size_type amt;
- /* Compute the hash values for all exported symbols. At the same
- time store the values in an array so that we could use them for
- optimizations. */
- amt = dynsymcount;
- amt *= sizeof (unsigned long int);
- hashcodes = bfd_malloc (amt);
- if (hashcodes == NULL)
- return 0;
- hashcodesp = hashcodes;
-
- /* Put all hash values in HASHCODES. */
- elf_link_hash_traverse (elf_hash_table (info),
- elf_collect_hash_codes, &hashcodesp);
-
/* We have a problem here. The following code to optimize the table
size requires an integer type with more the 32 bits. If
BFD_HOST_U_64_BIT is set we know about such a type. */
#ifdef BFD_HOST_U_64_BIT
if (info->optimize)
{
- unsigned long int nsyms = hashcodesp - hashcodes;
size_t minsize;
size_t maxsize;
BFD_HOST_U_64_BIT best_chlen = ~((BFD_HOST_U_64_BIT) 0);
- unsigned long int *counts ;
bfd *dynobj = elf_hash_table (info)->dynobj;
const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
+ unsigned long int *counts;
/* Possible optimization parameters: if we have NSYMS symbols we say
that the hashing table must at least have NSYMS/4 and at most
@@ -4876,6 +5003,13 @@ compute_bucket_count (struct bfd_link_info *info)
if (minsize == 0)
minsize = 1;
best_size = maxsize = nsyms * 2;
+ if (gnu_hash)
+ {
+ if (minsize < 2)
+ minsize = 2;
+ if ((best_size & 31) == 0)
+ ++best_size;
+ }
/* Create array where we count the collisions in. We must use bfd_malloc
since the size could be large. */
@@ -4883,10 +5017,7 @@ compute_bucket_count (struct bfd_link_info *info)
amt *= sizeof (unsigned long int);
counts = bfd_malloc (amt);
if (counts == NULL)
- {
- free (hashcodes);
- return 0;
- }
+ return 0;
/* Compute the "optimal" size for the hash table. The criteria is a
minimal chain length. The minor criteria is (of course) the size
@@ -4898,6 +5029,9 @@ compute_bucket_count (struct bfd_link_info *info)
unsigned long int j;
unsigned long int fact;
+ if (gnu_hash && (i & 31) == 0)
+ continue;
+
memset (counts, '\0', i * sizeof (unsigned long int));
/* Determine how often each hash bucket is used. */
@@ -4913,9 +5047,9 @@ compute_bucket_count (struct bfd_link_info *info)
# define BFD_TARGET_PAGESIZE (4096)
# endif
- /* We in any case need 2 + NSYMS entries for the size values and
- the chains. */
- max = (2 + nsyms) * (bed->s->arch_size / 8);
+ /* We in any case need 2 + DYNSYMCOUNT entries for the size values
+ and the chains. */
+ max = (2 + dynsymcount) * bed->s->sizeof_hash_entry;
# if 1
/* Variant 1: optimize for short chains. We add the squares
@@ -4925,7 +5059,7 @@ compute_bucket_count (struct bfd_link_info *info)
max += counts[j] * counts[j];
/* This adds penalties for the overall size of the table. */
- fact = i / (BFD_TARGET_PAGESIZE / (bed->s->arch_size / 8)) + 1;
+ fact = i / (BFD_TARGET_PAGESIZE / bed->s->sizeof_hash_entry) + 1;
max *= fact * fact;
# else
/* Variant 2: Optimize a lot more for small table. Here we
@@ -4936,7 +5070,7 @@ compute_bucket_count (struct bfd_link_info *info)
/* The overall size of the table is considered, but not as
strong as in variant 1, where it is squared. */
- fact = i / (BFD_TARGET_PAGESIZE / (bed->s->arch_size / 8)) + 1;
+ fact = i / (BFD_TARGET_PAGESIZE / bed->s->sizeof_hash_entry) + 1;
max *= fact;
# endif
@@ -4959,14 +5093,13 @@ compute_bucket_count (struct bfd_link_info *info)
for (i = 0; elf_buckets[i] != 0; i++)
{
best_size = elf_buckets[i];
- if (dynsymcount < elf_buckets[i + 1])
+ if (nsyms < elf_buckets[i + 1])
break;
}
+ if (gnu_hash && best_size < 2)
+ best_size = 2;
}
- /* Free the arrays we needed. */
- free (hashcodes);
-
return best_size;
}
@@ -5324,7 +5457,10 @@ bfd_elf_size_dynamic_sections (bfd *output_bfd,
bfd_size_type strsize;
strsize = _bfd_elf_strtab_size (elf_hash_table (info)->dynstr);
- if (!_bfd_elf_add_dynamic_entry (info, DT_HASH, 0)
+ if ((info->emit_hash
+ && !_bfd_elf_add_dynamic_entry (info, DT_HASH, 0))
+ || (info->emit_gnu_hash
+ && !_bfd_elf_add_dynamic_entry (info, DT_GNU_HASH, 0))
|| !_bfd_elf_add_dynamic_entry (info, DT_STRTAB, 0)
|| !_bfd_elf_add_dynamic_entry (info, DT_SYMTAB, 0)
|| !_bfd_elf_add_dynamic_entry (info, DT_STRSZ, strsize)
@@ -5726,8 +5862,6 @@ bfd_elf_size_dynsym_hash_dynstr (bfd *output_bfd, struct bfd_link_info *info)
asection *s;
bfd_size_type dynsymcount;
unsigned long section_sym_count;
- size_t bucketcount = 0;
- size_t hash_entry_size;
unsigned int dtagcount;
dynobj = elf_hash_table (info)->dynobj;
@@ -5778,23 +5912,215 @@ bfd_elf_size_dynsym_hash_dynstr (bfd *output_bfd, struct bfd_link_info *info)
memset (s->contents, 0, section_sym_count * bed->s->sizeof_sym);
}
+ elf_hash_table (info)->bucketcount = 0;
+
/* Compute the size of the hashing table. As a side effect this
computes the hash values for all the names we export. */
- bucketcount = compute_bucket_count (info);
+ if (info->emit_hash)
+ {
+ unsigned long int *hashcodes;
+ unsigned long int *hashcodesp;
+ bfd_size_type amt;
+ unsigned long int nsyms;
+ size_t bucketcount;
+ size_t hash_entry_size;
+
+ /* Compute the hash values for all exported symbols. At the same
+ time store the values in an array so that we could use them for
+ optimizations. */
+ amt = dynsymcount * sizeof (unsigned long int);
+ hashcodes = bfd_malloc (amt);
+ if (hashcodes == NULL)
+ return FALSE;
+ hashcodesp = hashcodes;
- s = bfd_get_section_by_name (dynobj, ".hash");
- BFD_ASSERT (s != NULL);
- hash_entry_size = elf_section_data (s)->this_hdr.sh_entsize;
- s->size = ((2 + bucketcount + dynsymcount) * hash_entry_size);
- s->contents = bfd_zalloc (output_bfd, s->size);
- if (s->contents == NULL)
- return FALSE;
+ /* Put all hash values in HASHCODES. */
+ elf_link_hash_traverse (elf_hash_table (info),
+ elf_collect_hash_codes, &hashcodesp);
+
+ nsyms = hashcodesp - hashcodes;
+ bucketcount
+ = compute_bucket_count (info, hashcodes, nsyms, 0);
+ free (hashcodes);
+
+ if (bucketcount == 0)
+ return FALSE;
- bfd_put (8 * hash_entry_size, output_bfd, bucketcount, s->contents);
- bfd_put (8 * hash_entry_size, output_bfd, dynsymcount,
- s->contents + hash_entry_size);
+ elf_hash_table (info)->bucketcount = bucketcount;
- elf_hash_table (info)->bucketcount = bucketcount;
+ s = bfd_get_section_by_name (dynobj, ".hash");
+ BFD_ASSERT (s != NULL);
+ hash_entry_size = elf_section_data (s)->this_hdr.sh_entsize;
+ s->size = ((2 + bucketcount + dynsymcount) * hash_entry_size);
+ s->contents = bfd_zalloc (output_bfd, s->size);
+ if (s->contents == NULL)
+ return FALSE;
+
+ bfd_put (8 * hash_entry_size, output_bfd, bucketcount, s->contents);
+ bfd_put (8 * hash_entry_size, output_bfd, dynsymcount,
+ s->contents + hash_entry_size);
+ }
+
+ if (info->emit_gnu_hash)
+ {
+ size_t i, cnt;
+ unsigned char *contents;
+ struct collect_gnu_hash_codes cinfo;
+ bfd_size_type amt;
+ size_t bucketcount;
+
+ memset (&cinfo, 0, sizeof (cinfo));
+
+ /* Compute the hash values for all exported symbols. At the same
+ time store the values in an array so that we could use them for
+ optimizations. */
+ amt = dynsymcount * 2 * sizeof (unsigned long int);
+ cinfo.hashcodes = bfd_malloc (amt);
+ if (cinfo.hashcodes == NULL)
+ return FALSE;
+
+ cinfo.hashval = cinfo.hashcodes + dynsymcount;
+ cinfo.min_dynindx = -1;
+ cinfo.output_bfd = output_bfd;
+ cinfo.bed = bed;
+
+ /* Put all hash values in HASHCODES. */
+ elf_link_hash_traverse (elf_hash_table (info),
+ elf_collect_gnu_hash_codes, &cinfo);
+
+ bucketcount
+ = compute_bucket_count (info, cinfo.hashcodes, cinfo.nsyms, 1);
+
+ if (bucketcount == 0)
+ {
+ free (cinfo.hashcodes);
+ return FALSE;
+ }
+
+ s = bfd_get_section_by_name (dynobj, ".gnu.hash");
+ BFD_ASSERT (s != NULL);
+
+ if (cinfo.nsyms == 0)
+ {
+ /* Empty .gnu.hash section is special. */
+ BFD_ASSERT (cinfo.min_dynindx == -1);
+ free (cinfo.hashcodes);
+ s->size = 5 * 4 + bed->s->arch_size / 8;
+ contents = bfd_zalloc (output_bfd, s->size);
+ if (contents == NULL)
+ return FALSE;
+ s->contents = contents;
+ /* 1 empty bucket. */
+ bfd_put_32 (output_bfd, 1, contents);
+ /* SYMIDX above the special symbol 0. */
+ bfd_put_32 (output_bfd, 1, contents + 4);
+ /* Just one word for bitmask. */
+ bfd_put_32 (output_bfd, 1, contents + 8);
+ /* Only hash fn bloom filter. */
+ bfd_put_32 (output_bfd, 0, contents + 12);
+ /* No hashes are valid - empty bitmask. */
+ bfd_put (bed->s->arch_size, output_bfd, 0, contents + 16);
+ /* No hashes in the only bucket. */
+ bfd_put_32 (output_bfd, 0,
+ contents + 16 + bed->s->arch_size / 8);
+ }
+ else
+ {
+ BFD_ASSERT (cinfo.min_dynindx != -1);
+ unsigned long int maskwords, maskbitslog2;
+
+ maskbitslog2 = bfd_log2 (cinfo.nsyms) + 1;
+ if (maskbitslog2 < 3)
+ maskbitslog2 = 5;
+ else if ((1 << (maskbitslog2 - 2)) & cinfo.nsyms)
+ maskbitslog2 = maskbitslog2 + 3;
+ else
+ maskbitslog2 = maskbitslog2 + 2;
+ if (bed->s->arch_size == 64)
+ {
+ if (maskbitslog2 == 5)
+ maskbitslog2 = 6;
+ cinfo.shift1 = 6;
+ }
+ else
+ cinfo.shift1 = 5;
+ cinfo.mask = (1 << cinfo.shift1) - 1;
+ cinfo.shift2 = maskbitslog2 + cinfo.shift1;
+ cinfo.maskbits = 1 << maskbitslog2;
+ maskwords = 1 << (maskbitslog2 - cinfo.shift1);
+ amt = bucketcount * sizeof (unsigned long int) * 2;
+ amt += maskwords * sizeof (bfd_vma);
+ cinfo.bitmask = bfd_malloc (amt);
+ if (cinfo.bitmask == NULL)
+ {
+ free (cinfo.hashcodes);
+ return FALSE;
+ }
+
+ cinfo.counts = (void *) (cinfo.bitmask + maskwords);
+ cinfo.indx = cinfo.counts + bucketcount;
+ cinfo.symindx = dynsymcount - cinfo.nsyms;
+ memset (cinfo.bitmask, 0, maskwords * sizeof (bfd_vma));
+
+ /* Determine how often each hash bucket is used. */
+ memset (cinfo.counts, 0, bucketcount * sizeof (cinfo.counts[0]));
+ for (i = 0; i < cinfo.nsyms; ++i)
+ ++cinfo.counts[cinfo.hashcodes[i] % bucketcount];
+
+ for (i = 0, cnt = cinfo.symindx; i < bucketcount; ++i)
+ if (cinfo.counts[i] != 0)
+ {
+ cinfo.indx[i] = cnt;
+ cnt += cinfo.counts[i];
+ }
+ BFD_ASSERT (cnt == dynsymcount);
+ cinfo.bucketcount = bucketcount;
+ cinfo.local_indx = cinfo.min_dynindx;
+
+ s->size = (4 + bucketcount + cinfo.nsyms) * 4;
+ s->size += cinfo.maskbits / 8;
+ contents = bfd_zalloc (output_bfd, s->size);
+ if (contents == NULL)
+ {
+ free (cinfo.bitmask);
+ free (cinfo.hashcodes);
+ return FALSE;
+ }
+
+ s->contents = contents;
+ bfd_put_32 (output_bfd, bucketcount, contents);
+ bfd_put_32 (output_bfd, cinfo.symindx, contents + 4);
+ bfd_put_32 (output_bfd, maskwords, contents + 8);
+ bfd_put_32 (output_bfd, cinfo.shift2, contents + 12);
+ contents += 16 + cinfo.maskbits / 8;
+
+ for (i = 0; i < bucketcount; ++i)
+ {
+ if (cinfo.counts[i] == 0)
+ bfd_put_32 (output_bfd, 0, contents);
+ else
+ bfd_put_32 (output_bfd, cinfo.indx[i], contents);
+ contents += 4;
+ }
+
+ cinfo.contents = contents;
+
+ /* Renumber dynamic symbols, populate .gnu.hash section. */
+ elf_link_hash_traverse (elf_hash_table (info),
+ elf_renumber_gnu_hash_syms, &cinfo);
+
+ contents = s->contents + 16;
+ for (i = 0; i < maskwords; ++i)
+ {
+ bfd_put (bed->s->arch_size, output_bfd, cinfo.bitmask[i],
+ contents);
+ contents += bed->s->arch_size / 8;
+ }
+
+ free (cinfo.bitmask);
+ free (cinfo.hashcodes);
+ }
+ }
s = bfd_get_section_by_name (dynobj, ".dynstr");
BFD_ASSERT (s != NULL);
@@ -6663,9 +6989,6 @@ elf_link_output_extsym (struct elf_link_hash_entry *h, void *data)
{
size_t bucketcount;
size_t bucket;
- size_t hash_entry_size;
- bfd_byte *bucketpos;
- bfd_vma chain;
bfd_byte *esym;
sym.st_name = h->dynstr_index;
@@ -6679,15 +7002,23 @@ elf_link_output_extsym (struct elf_link_hash_entry *h, void *data)
bucketcount = elf_hash_table (finfo->info)->bucketcount;
bucket = h->u.elf_hash_value % bucketcount;
- hash_entry_size
- = elf_section_data (finfo->hash_sec)->this_hdr.sh_entsize;
- bucketpos = ((bfd_byte *) finfo->hash_sec->contents
- + (bucket + 2) * hash_entry_size);
- chain = bfd_get (8 * hash_entry_size, finfo->output_bfd, bucketpos);
- bfd_put (8 * hash_entry_size, finfo->output_bfd, h->dynindx, bucketpos);
- bfd_put (8 * hash_entry_size, finfo->output_bfd, chain,
- ((bfd_byte *) finfo->hash_sec->contents
- + (bucketcount + 2 + h->dynindx) * hash_entry_size));
+
+ if (finfo->hash_sec != NULL)
+ {
+ size_t hash_entry_size;
+ bfd_byte *bucketpos;
+ bfd_vma chain;
+
+ hash_entry_size
+ = elf_section_data (finfo->hash_sec)->this_hdr.sh_entsize;
+ bucketpos = ((bfd_byte *) finfo->hash_sec->contents
+ + (bucket + 2) * hash_entry_size);
+ chain = bfd_get (8 * hash_entry_size, finfo->output_bfd, bucketpos);
+ bfd_put (8 * hash_entry_size, finfo->output_bfd, h->dynindx, bucketpos);
+ bfd_put (8 * hash_entry_size, finfo->output_bfd, chain,
+ ((bfd_byte *) finfo->hash_sec->contents
+ + (bucketcount + 2 + h->dynindx) * hash_entry_size));
+ }
if (finfo->symver_sec != NULL && finfo->symver_sec->contents != NULL)
{
@@ -7861,7 +8192,7 @@ bfd_elf_final_link (bfd *abfd, struct bfd_link_info *info)
{
finfo.dynsym_sec = bfd_get_section_by_name (dynobj, ".dynsym");
finfo.hash_sec = bfd_get_section_by_name (dynobj, ".hash");
- BFD_ASSERT (finfo.dynsym_sec != NULL && finfo.hash_sec != NULL);
+ BFD_ASSERT (finfo.dynsym_sec != NULL);
finfo.symver_sec = bfd_get_section_by_name (dynobj, ".gnu.version");
/* Note that it is OK if symver_sec is NULL. */
}
@@ -8621,6 +8952,9 @@ bfd_elf_final_link (bfd *abfd, struct bfd_link_info *info)
case DT_HASH:
name = ".hash";
goto get_vma;
+ case DT_GNU_HASH:
+ name = ".gnu.hash";
+ goto get_vma;
case DT_STRTAB:
name = ".dynstr";
goto get_vma;