aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2005-05-26 07:14:03 +0000
committerRichard Henderson <rth@redhat.com>2005-05-26 07:14:03 +0000
commita7519a3c057af6f71dcf4720270b3c4cf6e740a1 (patch)
treec18bd90da1b22e34e813f986e97aae39845e1c8a
parent913ccbda02a8ceb5703421a9f38b32efab8d425d (diff)
downloadgdb-a7519a3c057af6f71dcf4720270b3c4cf6e740a1.zip
gdb-a7519a3c057af6f71dcf4720270b3c4cf6e740a1.tar.gz
gdb-a7519a3c057af6f71dcf4720270b3c4cf6e740a1.tar.bz2
* elf64-alpha.c: Update all function definitions to ISO C. Remove
all function prototypes; rearrange functions into def-use order.
-rw-r--r--bfd/ChangeLog5
-rw-r--r--bfd/elf64-alpha.c2867
2 files changed, 1293 insertions, 1579 deletions
diff --git a/bfd/ChangeLog b/bfd/ChangeLog
index 5c3466f..40a5e44 100644
--- a/bfd/ChangeLog
+++ b/bfd/ChangeLog
@@ -1,5 +1,10 @@
2005-05-25 Richard Henderson <rth@redhat.com>
+ * elf64-alpha.c: Update all function definitions to ISO C. Remove
+ all function prototypes; rearrange functions into def-use order.
+
+2005-05-25 Richard Henderson <rth@redhat.com>
+
* elf64-alpha.c (elf64_alpha_merge_gots): Fix gotent iteration
in the presence of deleting elements.
(elf64_alpha_size_got_sections): Zero dead got section size.
diff --git a/bfd/elf64-alpha.c b/bfd/elf64-alpha.c
index e13f260..fd0d052 100644
--- a/bfd/elf64-alpha.c
+++ b/bfd/elf64-alpha.c
@@ -47,114 +47,6 @@
#define ECOFF_64
#include "ecoffswap.h"
-static bfd_boolean alpha_elf_dynamic_symbol_p
- PARAMS ((struct elf_link_hash_entry *, struct bfd_link_info *));
-static struct bfd_hash_entry * elf64_alpha_link_hash_newfunc
- PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
-static struct bfd_link_hash_table * elf64_alpha_bfd_link_hash_table_create
- PARAMS ((bfd *));
-
-static bfd_reloc_status_type elf64_alpha_reloc_nil
- PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
-static bfd_reloc_status_type elf64_alpha_reloc_bad
- PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
-static bfd_reloc_status_type elf64_alpha_do_reloc_gpdisp
- PARAMS ((bfd *, bfd_vma, bfd_byte *, bfd_byte *));
-static bfd_reloc_status_type elf64_alpha_reloc_gpdisp
- PARAMS ((bfd *, arelent *, asymbol *, PTR, asection *, bfd *, char **));
-
-static reloc_howto_type * elf64_alpha_bfd_reloc_type_lookup
- PARAMS ((bfd *, bfd_reloc_code_real_type));
-static void elf64_alpha_info_to_howto
- PARAMS ((bfd *, arelent *, Elf_Internal_Rela *));
-
-static bfd_boolean elf64_alpha_mkobject
- PARAMS ((bfd *));
-static bfd_boolean elf64_alpha_object_p
- PARAMS ((bfd *));
-static bfd_boolean elf64_alpha_section_flags
- PARAMS ((flagword *, const Elf_Internal_Shdr *));
-static bfd_boolean elf64_alpha_fake_sections
- PARAMS ((bfd *, Elf_Internal_Shdr *, asection *));
-static bfd_boolean elf64_alpha_create_got_section
- PARAMS ((bfd *, struct bfd_link_info *));
-static bfd_boolean elf64_alpha_create_dynamic_sections
- PARAMS ((bfd *, struct bfd_link_info *));
-
-static bfd_boolean elf64_alpha_read_ecoff_info
- PARAMS ((bfd *, asection *, struct ecoff_debug_info *));
-static bfd_boolean elf64_alpha_is_local_label_name
- PARAMS ((bfd *, const char *));
-static bfd_boolean elf64_alpha_find_nearest_line
- PARAMS ((bfd *, asection *, asymbol **, bfd_vma, const char **,
- const char **, unsigned int *));
-
-#if defined(__STDC__) || defined(ALMOST_STDC)
-struct alpha_elf_link_hash_entry;
-#endif
-
-static bfd_boolean elf64_alpha_output_extsym
- PARAMS ((struct alpha_elf_link_hash_entry *, PTR));
-
-static bfd_boolean elf64_alpha_can_merge_gots
- PARAMS ((bfd *, bfd *));
-static void elf64_alpha_merge_gots
- PARAMS ((bfd *, bfd *));
-static bfd_boolean elf64_alpha_calc_got_offsets_for_symbol
- PARAMS ((struct alpha_elf_link_hash_entry *, PTR));
-static void elf64_alpha_calc_got_offsets
- PARAMS ((struct bfd_link_info *));
-static bfd_boolean elf64_alpha_size_got_sections
- PARAMS ((struct bfd_link_info *));
-static bfd_boolean elf64_alpha_size_plt_section
- PARAMS ((struct bfd_link_info *));
-static bfd_boolean elf64_alpha_size_plt_section_1
- PARAMS ((struct alpha_elf_link_hash_entry *, PTR));
-static bfd_boolean elf64_alpha_always_size_sections
- PARAMS ((bfd *, struct bfd_link_info *));
-static int alpha_dynamic_entries_for_reloc
- PARAMS ((int, int, int));
-static bfd_boolean elf64_alpha_calc_dynrel_sizes
- PARAMS ((struct alpha_elf_link_hash_entry *, struct bfd_link_info *));
-static bfd_boolean elf64_alpha_size_rela_got_section
- PARAMS ((struct bfd_link_info *));
-static bfd_boolean elf64_alpha_size_rela_got_1
- PARAMS ((struct alpha_elf_link_hash_entry *, struct bfd_link_info *));
-static bfd_boolean elf64_alpha_add_symbol_hook
- PARAMS ((bfd *, struct bfd_link_info *, Elf_Internal_Sym *,
- const char **, flagword *, asection **, bfd_vma *));
-static struct alpha_elf_got_entry *get_got_entry
- PARAMS ((bfd *, struct alpha_elf_link_hash_entry *, unsigned long,
- unsigned long, bfd_vma));
-static bfd_boolean elf64_alpha_check_relocs
- PARAMS ((bfd *, struct bfd_link_info *, asection *sec,
- const Elf_Internal_Rela *));
-static bfd_boolean elf64_alpha_adjust_dynamic_symbol
- PARAMS ((struct bfd_link_info *, struct elf_link_hash_entry *));
-static bfd_boolean elf64_alpha_size_dynamic_sections
- PARAMS ((bfd *, struct bfd_link_info *));
-static void elf64_alpha_emit_dynrel
- PARAMS ((bfd *, struct bfd_link_info *, asection *, asection *,
- bfd_vma, long, long, bfd_vma));
-static bfd_boolean elf64_alpha_relocate_section_r
- PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
- Elf_Internal_Rela *, Elf_Internal_Sym *, asection **));
-static bfd_boolean elf64_alpha_relocate_section
- PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *, bfd_byte *,
- Elf_Internal_Rela *, Elf_Internal_Sym *, asection **));
-static bfd_boolean elf64_alpha_finish_dynamic_symbol
- PARAMS ((bfd *, struct bfd_link_info *, struct elf_link_hash_entry *,
- Elf_Internal_Sym *));
-static bfd_boolean elf64_alpha_finish_dynamic_sections
- PARAMS ((bfd *, struct bfd_link_info *));
-static bfd_boolean elf64_alpha_final_link
- PARAMS ((bfd *, struct bfd_link_info *));
-static bfd_boolean elf64_alpha_merge_ind_symbols
- PARAMS ((struct alpha_elf_link_hash_entry *, PTR));
-static Elf_Internal_Rela * elf64_alpha_find_reloc_at_ofs
- PARAMS ((Elf_Internal_Rela *, Elf_Internal_Rela *, bfd_vma, int));
-static enum elf_reloc_type_class elf64_alpha_reloc_type_class
- PARAMS ((const Elf_Internal_Rela *));
struct alpha_elf_link_hash_entry
{
@@ -254,7 +146,7 @@ struct alpha_elf_link_hash_table
#define alpha_elf_link_hash_traverse(table, func, info) \
(elf_link_hash_traverse \
(&(table)->root, \
- (bfd_boolean (*) PARAMS ((struct elf_link_hash_entry *, PTR))) (func), \
+ (bfd_boolean (*) (struct elf_link_hash_entry *, PTR)) (func), \
(info)))
/* Get the Alpha ELF linker hash table from a link_info structure. */
@@ -273,9 +165,8 @@ struct alpha_elf_link_hash_table
address is ever taken. */
static inline bfd_boolean
-alpha_elf_dynamic_symbol_p (h, info)
- struct elf_link_hash_entry *h;
- struct bfd_link_info *info;
+alpha_elf_dynamic_symbol_p (struct elf_link_hash_entry *h,
+ struct bfd_link_info *info)
{
return _bfd_elf_dynamic_symbol_p (h, info, 0);
}
@@ -283,10 +174,9 @@ alpha_elf_dynamic_symbol_p (h, info)
/* Create an entry in a Alpha ELF linker hash table. */
static struct bfd_hash_entry *
-elf64_alpha_link_hash_newfunc (entry, table, string)
- struct bfd_hash_entry *entry;
- struct bfd_hash_table *table;
- const char *string;
+elf64_alpha_link_hash_newfunc (struct bfd_hash_entry *entry,
+ struct bfd_hash_table *table,
+ const char *string)
{
struct alpha_elf_link_hash_entry *ret =
(struct alpha_elf_link_hash_entry *) entry;
@@ -322,8 +212,7 @@ elf64_alpha_link_hash_newfunc (entry, table, string)
/* Create a Alpha ELF linker hash table. */
static struct bfd_link_hash_table *
-elf64_alpha_bfd_link_hash_table_create (abfd)
- bfd *abfd;
+elf64_alpha_bfd_link_hash_table_create (bfd *abfd)
{
struct alpha_elf_link_hash_table *ret;
bfd_size_type amt = sizeof (struct alpha_elf_link_hash_table);
@@ -377,8 +266,7 @@ struct alpha_elf_obj_tdata
((struct alpha_elf_obj_tdata *) (abfd)->tdata.any)
static bfd_boolean
-elf64_alpha_mkobject (abfd)
- bfd *abfd;
+elf64_alpha_mkobject (bfd *abfd)
{
bfd_size_type amt = sizeof (struct alpha_elf_obj_tdata);
abfd->tdata.any = bfd_zalloc (abfd, amt);
@@ -388,13 +276,123 @@ elf64_alpha_mkobject (abfd)
}
static bfd_boolean
-elf64_alpha_object_p (abfd)
- bfd *abfd;
+elf64_alpha_object_p (bfd *abfd)
{
/* Set the right machine number for an Alpha ELF file. */
return bfd_default_set_arch_mach (abfd, bfd_arch_alpha, 0);
}
+/* A relocation function which doesn't do anything. */
+
+static bfd_reloc_status_type
+elf64_alpha_reloc_nil (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc,
+ asymbol *sym ATTRIBUTE_UNUSED,
+ PTR data ATTRIBUTE_UNUSED, asection *sec,
+ bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED)
+{
+ if (output_bfd)
+ reloc->address += sec->output_offset;
+ return bfd_reloc_ok;
+}
+
+/* A relocation function used for an unsupported reloc. */
+
+static bfd_reloc_status_type
+elf64_alpha_reloc_bad (bfd *abfd ATTRIBUTE_UNUSED, arelent *reloc,
+ asymbol *sym ATTRIBUTE_UNUSED,
+ PTR data ATTRIBUTE_UNUSED, asection *sec,
+ bfd *output_bfd, char **error_message ATTRIBUTE_UNUSED)
+{
+ if (output_bfd)
+ reloc->address += sec->output_offset;
+ return bfd_reloc_notsupported;
+}
+
+/* Do the work of the GPDISP relocation. */
+
+static bfd_reloc_status_type
+elf64_alpha_do_reloc_gpdisp (bfd *abfd, bfd_vma gpdisp, bfd_byte *p_ldah,
+ bfd_byte *p_lda)
+{
+ bfd_reloc_status_type ret = bfd_reloc_ok;
+ bfd_vma addend;
+ unsigned long i_ldah, i_lda;
+
+ i_ldah = bfd_get_32 (abfd, p_ldah);
+ i_lda = bfd_get_32 (abfd, p_lda);
+
+ /* Complain if the instructions are not correct. */
+ if (((i_ldah >> 26) & 0x3f) != 0x09
+ || ((i_lda >> 26) & 0x3f) != 0x08)
+ ret = bfd_reloc_dangerous;
+
+ /* Extract the user-supplied offset, mirroring the sign extensions
+ that the instructions perform. */
+ addend = ((i_ldah & 0xffff) << 16) | (i_lda & 0xffff);
+ addend = (addend ^ 0x80008000) - 0x80008000;
+
+ gpdisp += addend;
+
+ if ((bfd_signed_vma) gpdisp < -(bfd_signed_vma) 0x80000000
+ || (bfd_signed_vma) gpdisp >= (bfd_signed_vma) 0x7fff8000)
+ ret = bfd_reloc_overflow;
+
+ /* compensate for the sign extension again. */
+ i_ldah = ((i_ldah & 0xffff0000)
+ | (((gpdisp >> 16) + ((gpdisp >> 15) & 1)) & 0xffff));
+ i_lda = (i_lda & 0xffff0000) | (gpdisp & 0xffff);
+
+ bfd_put_32 (abfd, (bfd_vma) i_ldah, p_ldah);
+ bfd_put_32 (abfd, (bfd_vma) i_lda, p_lda);
+
+ return ret;
+}
+
+/* The special function for the GPDISP reloc. */
+
+static bfd_reloc_status_type
+elf64_alpha_reloc_gpdisp (bfd *abfd, arelent *reloc_entry,
+ asymbol *sym ATTRIBUTE_UNUSED, PTR data,
+ asection *input_section, bfd *output_bfd,
+ char **err_msg)
+{
+ bfd_reloc_status_type ret;
+ bfd_vma gp, relocation;
+ bfd_vma high_address;
+ bfd_byte *p_ldah, *p_lda;
+
+ /* Don't do anything if we're not doing a final link. */
+ if (output_bfd)
+ {
+ reloc_entry->address += input_section->output_offset;
+ return bfd_reloc_ok;
+ }
+
+ high_address = bfd_get_section_limit (abfd, input_section);
+ if (reloc_entry->address > high_address
+ || reloc_entry->address + reloc_entry->addend > high_address)
+ return bfd_reloc_outofrange;
+
+ /* The gp used in the portion of the output object to which this
+ input object belongs is cached on the input bfd. */
+ gp = _bfd_get_gp_value (abfd);
+
+ relocation = (input_section->output_section->vma
+ + input_section->output_offset
+ + reloc_entry->address);
+
+ p_ldah = (bfd_byte *) data + reloc_entry->address;
+ p_lda = p_ldah + reloc_entry->addend;
+
+ ret = elf64_alpha_do_reloc_gpdisp (abfd, gp - relocation, p_ldah, p_lda);
+
+ /* Complain if the instructions are not correct. */
+ if (ret == bfd_reloc_dangerous)
+ *err_msg = _("GPDISP relocation did not find ldah and lda instructions");
+
+ return ret;
+}
+
/* In case we're on a 32-bit machine, construct a 64-bit "-1" value
from smaller values. Start with zero, widen, *then* decrement. */
#define MINUS_ONE (((bfd_vma)0) - 1)
@@ -942,133 +940,6 @@ static reloc_howto_type elf64_alpha_howto_table[] =
FALSE), /* pcrel_offset */
};
-/* A relocation function which doesn't do anything. */
-
-static bfd_reloc_status_type
-elf64_alpha_reloc_nil (abfd, reloc, sym, data, sec, output_bfd, error_message)
- bfd *abfd ATTRIBUTE_UNUSED;
- arelent *reloc;
- asymbol *sym ATTRIBUTE_UNUSED;
- PTR data ATTRIBUTE_UNUSED;
- asection *sec;
- bfd *output_bfd;
- char **error_message ATTRIBUTE_UNUSED;
-{
- if (output_bfd)
- reloc->address += sec->output_offset;
- return bfd_reloc_ok;
-}
-
-/* A relocation function used for an unsupported reloc. */
-
-static bfd_reloc_status_type
-elf64_alpha_reloc_bad (abfd, reloc, sym, data, sec, output_bfd, error_message)
- bfd *abfd ATTRIBUTE_UNUSED;
- arelent *reloc;
- asymbol *sym ATTRIBUTE_UNUSED;
- PTR data ATTRIBUTE_UNUSED;
- asection *sec;
- bfd *output_bfd;
- char **error_message ATTRIBUTE_UNUSED;
-{
- if (output_bfd)
- reloc->address += sec->output_offset;
- return bfd_reloc_notsupported;
-}
-
-/* Do the work of the GPDISP relocation. */
-
-static bfd_reloc_status_type
-elf64_alpha_do_reloc_gpdisp (abfd, gpdisp, p_ldah, p_lda)
- bfd *abfd;
- bfd_vma gpdisp;
- bfd_byte *p_ldah;
- bfd_byte *p_lda;
-{
- bfd_reloc_status_type ret = bfd_reloc_ok;
- bfd_vma addend;
- unsigned long i_ldah, i_lda;
-
- i_ldah = bfd_get_32 (abfd, p_ldah);
- i_lda = bfd_get_32 (abfd, p_lda);
-
- /* Complain if the instructions are not correct. */
- if (((i_ldah >> 26) & 0x3f) != 0x09
- || ((i_lda >> 26) & 0x3f) != 0x08)
- ret = bfd_reloc_dangerous;
-
- /* Extract the user-supplied offset, mirroring the sign extensions
- that the instructions perform. */
- addend = ((i_ldah & 0xffff) << 16) | (i_lda & 0xffff);
- addend = (addend ^ 0x80008000) - 0x80008000;
-
- gpdisp += addend;
-
- if ((bfd_signed_vma) gpdisp < -(bfd_signed_vma) 0x80000000
- || (bfd_signed_vma) gpdisp >= (bfd_signed_vma) 0x7fff8000)
- ret = bfd_reloc_overflow;
-
- /* compensate for the sign extension again. */
- i_ldah = ((i_ldah & 0xffff0000)
- | (((gpdisp >> 16) + ((gpdisp >> 15) & 1)) & 0xffff));
- i_lda = (i_lda & 0xffff0000) | (gpdisp & 0xffff);
-
- bfd_put_32 (abfd, (bfd_vma) i_ldah, p_ldah);
- bfd_put_32 (abfd, (bfd_vma) i_lda, p_lda);
-
- return ret;
-}
-
-/* The special function for the GPDISP reloc. */
-
-static bfd_reloc_status_type
-elf64_alpha_reloc_gpdisp (abfd, reloc_entry, sym, data, input_section,
- output_bfd, err_msg)
- bfd *abfd;
- arelent *reloc_entry;
- asymbol *sym ATTRIBUTE_UNUSED;
- PTR data;
- asection *input_section;
- bfd *output_bfd;
- char **err_msg;
-{
- bfd_reloc_status_type ret;
- bfd_vma gp, relocation;
- bfd_vma high_address;
- bfd_byte *p_ldah, *p_lda;
-
- /* Don't do anything if we're not doing a final link. */
- if (output_bfd)
- {
- reloc_entry->address += input_section->output_offset;
- return bfd_reloc_ok;
- }
-
- high_address = bfd_get_section_limit (abfd, input_section);
- if (reloc_entry->address > high_address
- || reloc_entry->address + reloc_entry->addend > high_address)
- return bfd_reloc_outofrange;
-
- /* The gp used in the portion of the output object to which this
- input object belongs is cached on the input bfd. */
- gp = _bfd_get_gp_value (abfd);
-
- relocation = (input_section->output_section->vma
- + input_section->output_offset
- + reloc_entry->address);
-
- p_ldah = (bfd_byte *) data + reloc_entry->address;
- p_lda = p_ldah + reloc_entry->addend;
-
- ret = elf64_alpha_do_reloc_gpdisp (abfd, gp - relocation, p_ldah, p_lda);
-
- /* Complain if the instructions are not correct. */
- if (ret == bfd_reloc_dangerous)
- *err_msg = _("GPDISP relocation did not find ldah and lda instructions");
-
- return ret;
-}
-
/* A mapping from BFD reloc types to Alpha ELF reloc types. */
struct elf_reloc_map
@@ -1114,9 +985,8 @@ static const struct elf_reloc_map elf64_alpha_reloc_map[] =
/* Given a BFD reloc type, return a HOWTO structure. */
static reloc_howto_type *
-elf64_alpha_bfd_reloc_type_lookup (abfd, code)
- bfd *abfd ATTRIBUTE_UNUSED;
- bfd_reloc_code_real_type code;
+elf64_alpha_bfd_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
+ bfd_reloc_code_real_type code)
{
const struct elf_reloc_map *i, *e;
i = e = elf64_alpha_reloc_map;
@@ -1132,14 +1002,10 @@ elf64_alpha_bfd_reloc_type_lookup (abfd, code)
/* Given an Alpha ELF reloc type, fill in an arelent structure. */
static void
-elf64_alpha_info_to_howto (abfd, cache_ptr, dst)
- bfd *abfd ATTRIBUTE_UNUSED;
- arelent *cache_ptr;
- Elf_Internal_Rela *dst;
+elf64_alpha_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
+ Elf_Internal_Rela *dst)
{
- unsigned r_type;
-
- r_type = ELF64_R_TYPE(dst->r_info);
+ unsigned r_type = ELF64_R_TYPE(dst->r_info);
BFD_ASSERT (r_type < (unsigned int) R_ALPHA_max);
cache_ptr->howto = &elf64_alpha_howto_table[r_type];
}
@@ -1159,1117 +1025,6 @@ elf64_alpha_info_to_howto (abfd, cache_ptr, dst)
- align_power ((bfd_vma) 16, \
elf_hash_table (info)->tls_sec->alignment_power))
-/* These functions do relaxation for Alpha ELF.
-
- Currently I'm only handling what I can do with existing compiler
- and assembler support, which means no instructions are removed,
- though some may be nopped. At this time GCC does not emit enough
- information to do all of the relaxing that is possible. It will
- take some not small amount of work for that to happen.
-
- There are a couple of interesting papers that I once read on this
- subject, that I cannot find references to at the moment, that
- related to Alpha in particular. They are by David Wall, then of
- DEC WRL. */
-
-#define OP_LDA 0x08
-#define OP_LDAH 0x09
-#define INSN_JSR 0x68004000
-#define INSN_JSR_MASK 0xfc00c000
-#define OP_LDQ 0x29
-#define OP_BR 0x30
-#define OP_BSR 0x34
-#define INSN_UNOP 0x2ffe0000
-#define INSN_ADDQ 0x40000400
-#define INSN_RDUNIQ 0x0000009e
-
-struct alpha_relax_info
-{
- bfd *abfd;
- asection *sec;
- bfd_byte *contents;
- Elf_Internal_Shdr *symtab_hdr;
- Elf_Internal_Rela *relocs, *relend;
- struct bfd_link_info *link_info;
- bfd_vma gp;
- bfd *gotobj;
- asection *tsec;
- struct alpha_elf_link_hash_entry *h;
- struct alpha_elf_got_entry **first_gotent;
- struct alpha_elf_got_entry *gotent;
- bfd_boolean changed_contents;
- bfd_boolean changed_relocs;
- unsigned char other;
-};
-
-static bfd_boolean elf64_alpha_relax_with_lituse
- PARAMS((struct alpha_relax_info *info, bfd_vma symval,
- Elf_Internal_Rela *irel));
-static bfd_vma elf64_alpha_relax_opt_call
- PARAMS((struct alpha_relax_info *info, bfd_vma symval));
-static bfd_boolean elf64_alpha_relax_got_load
- PARAMS((struct alpha_relax_info *info, bfd_vma symval,
- Elf_Internal_Rela *irel, unsigned long));
-static bfd_boolean elf64_alpha_relax_gprelhilo
- PARAMS((struct alpha_relax_info *info, bfd_vma symval,
- Elf_Internal_Rela *irel, bfd_boolean));
-static bfd_boolean elf64_alpha_relax_tls_get_addr
- PARAMS((struct alpha_relax_info *info, bfd_vma symval,
- Elf_Internal_Rela *irel, bfd_boolean));
-static bfd_boolean elf64_alpha_relax_section
- PARAMS((bfd *abfd, asection *sec, struct bfd_link_info *link_info,
- bfd_boolean *again));
-
-static Elf_Internal_Rela *
-elf64_alpha_find_reloc_at_ofs (rel, relend, offset, type)
- Elf_Internal_Rela *rel, *relend;
- bfd_vma offset;
- int type;
-{
- while (rel < relend)
- {
- if (rel->r_offset == offset
- && ELF64_R_TYPE (rel->r_info) == (unsigned int) type)
- return rel;
- ++rel;
- }
- return NULL;
-}
-
-static bfd_boolean
-elf64_alpha_relax_with_lituse (info, symval, irel)
- struct alpha_relax_info *info;
- bfd_vma symval;
- Elf_Internal_Rela *irel;
-{
- Elf_Internal_Rela *urel, *irelend = info->relend;
- int flags, count, i;
- bfd_signed_vma disp;
- bfd_boolean fits16;
- bfd_boolean fits32;
- bfd_boolean lit_reused = FALSE;
- bfd_boolean all_optimized = TRUE;
- unsigned int lit_insn;
-
- lit_insn = bfd_get_32 (info->abfd, info->contents + irel->r_offset);
- if (lit_insn >> 26 != OP_LDQ)
- {
- ((*_bfd_error_handler)
- ("%B: %A+0x%lx: warning: LITERAL relocation against unexpected insn",
- info->abfd, info->sec,
- (unsigned long) irel->r_offset));
- return TRUE;
- }
-
- /* Can't relax dynamic symbols. */
- if (alpha_elf_dynamic_symbol_p (&info->h->root, info->link_info))
- return TRUE;
-
- /* Summarize how this particular LITERAL is used. */
- for (urel = irel+1, flags = count = 0; urel < irelend; ++urel, ++count)
- {
- if (ELF64_R_TYPE (urel->r_info) != R_ALPHA_LITUSE)
- break;
- if (urel->r_addend <= 3)
- flags |= 1 << urel->r_addend;
- }
-
- /* A little preparation for the loop... */
- disp = symval - info->gp;
-
- for (urel = irel+1, i = 0; i < count; ++i, ++urel)
- {
- unsigned int insn;
- int insn_disp;
- bfd_signed_vma xdisp;
-
- insn = bfd_get_32 (info->abfd, info->contents + urel->r_offset);
-
- switch (urel->r_addend)
- {
- case LITUSE_ALPHA_ADDR:
- default:
- /* This type is really just a placeholder to note that all
- uses cannot be optimized, but to still allow some. */
- all_optimized = FALSE;
- break;
-
- case LITUSE_ALPHA_BASE:
- /* We can always optimize 16-bit displacements. */
-
- /* Extract the displacement from the instruction, sign-extending
- it if necessary, then test whether it is within 16 or 32 bits
- displacement from GP. */
- insn_disp = ((insn & 0xffff) ^ 0x8000) - 0x8000;
-
- xdisp = disp + insn_disp;
- fits16 = (xdisp >= - (bfd_signed_vma) 0x8000 && xdisp < 0x8000);
- fits32 = (xdisp >= - (bfd_signed_vma) 0x80000000
- && xdisp < 0x7fff8000);
-
- if (fits16)
- {
- /* Take the op code and dest from this insn, take the base
- register from the literal insn. Leave the offset alone. */
- insn = (insn & 0xffe0ffff) | (lit_insn & 0x001f0000);
- urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
- R_ALPHA_GPREL16);
- urel->r_addend = irel->r_addend;
- info->changed_relocs = TRUE;
-
- bfd_put_32 (info->abfd, (bfd_vma) insn,
- info->contents + urel->r_offset);
- info->changed_contents = TRUE;
- }
-
- /* If all mem+byte, we can optimize 32-bit mem displacements. */
- else if (fits32 && !(flags & ~6))
- {
- /* FIXME: sanity check that lit insn Ra is mem insn Rb. */
-
- irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
- R_ALPHA_GPRELHIGH);
- lit_insn = (OP_LDAH << 26) | (lit_insn & 0x03ff0000);
- bfd_put_32 (info->abfd, (bfd_vma) lit_insn,
- info->contents + irel->r_offset);
- lit_reused = TRUE;
- info->changed_contents = TRUE;
-
- urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
- R_ALPHA_GPRELLOW);
- urel->r_addend = irel->r_addend;
- info->changed_relocs = TRUE;
- }
- else
- all_optimized = FALSE;
- break;
-
- case LITUSE_ALPHA_BYTOFF:
- /* We can always optimize byte instructions. */
-
- /* FIXME: sanity check the insn for byte op. Check that the
- literal dest reg is indeed Rb in the byte insn. */
-
- insn &= ~ (unsigned) 0x001ff000;
- insn |= ((symval & 7) << 13) | 0x1000;
-
- urel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
- urel->r_addend = 0;
- info->changed_relocs = TRUE;
-
- bfd_put_32 (info->abfd, (bfd_vma) insn,
- info->contents + urel->r_offset);
- info->changed_contents = TRUE;
- break;
-
- case LITUSE_ALPHA_JSR:
- case LITUSE_ALPHA_TLSGD:
- case LITUSE_ALPHA_TLSLDM:
- {
- bfd_vma optdest, org;
- bfd_signed_vma odisp;
-
- /* For undefined weak symbols, we're mostly interested in getting
- rid of the got entry whenever possible, so optimize this to a
- use of the zero register. */
- if (info->h && info->h->root.root.type == bfd_link_hash_undefweak)
- {
- insn |= 31 << 16;
- bfd_put_32 (info->abfd, (bfd_vma) insn,
- info->contents + urel->r_offset);
-
- info->changed_contents = TRUE;
- break;
- }
-
- /* If not zero, place to jump without needing pv. */
- optdest = elf64_alpha_relax_opt_call (info, symval);
- org = (info->sec->output_section->vma
- + info->sec->output_offset
- + urel->r_offset + 4);
- odisp = (optdest ? optdest : symval) - org;
-
- if (odisp >= -0x400000 && odisp < 0x400000)
- {
- Elf_Internal_Rela *xrel;
-
- /* Preserve branch prediction call stack when possible. */
- if ((insn & INSN_JSR_MASK) == INSN_JSR)
- insn = (OP_BSR << 26) | (insn & 0x03e00000);
- else
- insn = (OP_BR << 26) | (insn & 0x03e00000);
-
- urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
- R_ALPHA_BRADDR);
- urel->r_addend = irel->r_addend;
-
- if (optdest)
- urel->r_addend += optdest - symval;
- else
- all_optimized = FALSE;
-
- bfd_put_32 (info->abfd, (bfd_vma) insn,
- info->contents + urel->r_offset);
-
- /* Kill any HINT reloc that might exist for this insn. */
- xrel = (elf64_alpha_find_reloc_at_ofs
- (info->relocs, info->relend, urel->r_offset,
- R_ALPHA_HINT));
- if (xrel)
- xrel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
-
- info->changed_contents = TRUE;
- info->changed_relocs = TRUE;
- }
- else
- all_optimized = FALSE;
-
- /* Even if the target is not in range for a direct branch,
- if we share a GP, we can eliminate the gp reload. */
- if (optdest)
- {
- Elf_Internal_Rela *gpdisp
- = (elf64_alpha_find_reloc_at_ofs
- (info->relocs, irelend, urel->r_offset + 4,
- R_ALPHA_GPDISP));
- if (gpdisp)
- {
- bfd_byte *p_ldah = info->contents + gpdisp->r_offset;
- bfd_byte *p_lda = p_ldah + gpdisp->r_addend;
- unsigned int ldah = bfd_get_32 (info->abfd, p_ldah);
- unsigned int lda = bfd_get_32 (info->abfd, p_lda);
-
- /* Verify that the instruction is "ldah $29,0($26)".
- Consider a function that ends in a noreturn call,
- and that the next function begins with an ldgp,
- and that by accident there is no padding between.
- In that case the insn would use $27 as the base. */
- if (ldah == 0x27ba0000 && lda == 0x23bd0000)
- {
- bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, p_ldah);
- bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, p_lda);
-
- gpdisp->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
- info->changed_contents = TRUE;
- info->changed_relocs = TRUE;
- }
- }
- }
- }
- break;
- }
- }
-
- /* If all cases were optimized, we can reduce the use count on this
- got entry by one, possibly eliminating it. */
- if (all_optimized)
- {
- if (--info->gotent->use_count == 0)
- {
- int sz = alpha_got_entry_size (R_ALPHA_LITERAL);
- alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
- if (!info->h)
- alpha_elf_tdata (info->gotobj)->local_got_size -= sz;
- }
-
- /* If the literal instruction is no longer needed (it may have been
- reused. We can eliminate it. */
- /* ??? For now, I don't want to deal with compacting the section,
- so just nop it out. */
- if (!lit_reused)
- {
- irel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
- info->changed_relocs = TRUE;
-
- bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP,
- info->contents + irel->r_offset);
- info->changed_contents = TRUE;
- }
-
- return TRUE;
- }
- else
- return elf64_alpha_relax_got_load (info, symval, irel, R_ALPHA_LITERAL);
-}
-
-static bfd_vma
-elf64_alpha_relax_opt_call (info, symval)
- struct alpha_relax_info *info;
- bfd_vma symval;
-{
- /* If the function has the same gp, and we can identify that the
- function does not use its function pointer, we can eliminate the
- address load. */
-
- /* If the symbol is marked NOPV, we are being told the function never
- needs its procedure value. */
- if ((info->other & STO_ALPHA_STD_GPLOAD) == STO_ALPHA_NOPV)
- return symval;
-
- /* If the symbol is marked STD_GP, we are being told the function does
- a normal ldgp in the first two words. */
- else if ((info->other & STO_ALPHA_STD_GPLOAD) == STO_ALPHA_STD_GPLOAD)
- ;
-
- /* Otherwise, we may be able to identify a GP load in the first two
- words, which we can then skip. */
- else
- {
- Elf_Internal_Rela *tsec_relocs, *tsec_relend, *tsec_free, *gpdisp;
- bfd_vma ofs;
-
- /* Load the relocations from the section that the target symbol is in. */
- if (info->sec == info->tsec)
- {
- tsec_relocs = info->relocs;
- tsec_relend = info->relend;
- tsec_free = NULL;
- }
- else
- {
- tsec_relocs = (_bfd_elf_link_read_relocs
- (info->abfd, info->tsec, (PTR) NULL,
- (Elf_Internal_Rela *) NULL,
- info->link_info->keep_memory));
- if (tsec_relocs == NULL)
- return 0;
- tsec_relend = tsec_relocs + info->tsec->reloc_count;
- tsec_free = (info->link_info->keep_memory ? NULL : tsec_relocs);
- }
-
- /* Recover the symbol's offset within the section. */
- ofs = (symval - info->tsec->output_section->vma
- - info->tsec->output_offset);
-
- /* Look for a GPDISP reloc. */
- gpdisp = (elf64_alpha_find_reloc_at_ofs
- (tsec_relocs, tsec_relend, ofs, R_ALPHA_GPDISP));
-
- if (!gpdisp || gpdisp->r_addend != 4)
- {
- if (tsec_free)
- free (tsec_free);
- return 0;
- }
- if (tsec_free)
- free (tsec_free);
- }
-
- /* We've now determined that we can skip an initial gp load. Verify
- that the call and the target use the same gp. */
- if (info->link_info->hash->creator != info->tsec->owner->xvec
- || info->gotobj != alpha_elf_tdata (info->tsec->owner)->gotobj)
- return 0;
-
- return symval + 8;
-}
-
-static bfd_boolean
-elf64_alpha_relax_got_load (info, symval, irel, r_type)
- struct alpha_relax_info *info;
- bfd_vma symval;
- Elf_Internal_Rela *irel;
- unsigned long r_type;
-{
- unsigned int insn;
- bfd_signed_vma disp;
-
- /* Get the instruction. */
- insn = bfd_get_32 (info->abfd, info->contents + irel->r_offset);
-
- if (insn >> 26 != OP_LDQ)
- {
- reloc_howto_type *howto = elf64_alpha_howto_table + r_type;
- ((*_bfd_error_handler)
- ("%B: %A+0x%lx: warning: %s relocation against unexpected insn",
- info->abfd, info->sec,
- (unsigned long) irel->r_offset, howto->name));
- return TRUE;
- }
-
- /* Can't relax dynamic symbols. */
- if (alpha_elf_dynamic_symbol_p (&info->h->root, info->link_info))
- return TRUE;
-
- /* Can't use local-exec relocations in shared libraries. */
- if (r_type == R_ALPHA_GOTTPREL && info->link_info->shared)
- return TRUE;
-
- if (r_type == R_ALPHA_LITERAL)
- {
- /* Look for nice constant addresses. This includes the not-uncommon
- special case of 0 for undefweak symbols. */
- if ((info->h && info->h->root.root.type == bfd_link_hash_undefweak)
- || (!info->link_info->shared
- && (symval >= (bfd_vma)-0x8000 || symval < 0x8000)))
- {
- disp = 0;
- insn = (OP_LDA << 26) | (insn & (31 << 21)) | (31 << 16);
- insn |= (symval & 0xffff);
- r_type = R_ALPHA_NONE;
- }
- else
- {
- disp = symval - info->gp;
- insn = (OP_LDA << 26) | (insn & 0x03ff0000);
- r_type = R_ALPHA_GPREL16;
- }
- }
- else
- {
- bfd_vma dtp_base, tp_base;
-
- BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
- dtp_base = alpha_get_dtprel_base (info->link_info);
- tp_base = alpha_get_tprel_base (info->link_info);
- disp = symval - (r_type == R_ALPHA_GOTDTPREL ? dtp_base : tp_base);
-
- insn = (OP_LDA << 26) | (insn & (31 << 21)) | (31 << 16);
-
- switch (r_type)
- {
- case R_ALPHA_GOTDTPREL:
- r_type = R_ALPHA_DTPREL16;
- break;
- case R_ALPHA_GOTTPREL:
- r_type = R_ALPHA_TPREL16;
- break;
- default:
- BFD_ASSERT (0);
- return FALSE;
- }
- }
-
- if (disp < -0x8000 || disp >= 0x8000)
- return TRUE;
-
- bfd_put_32 (info->abfd, (bfd_vma) insn, info->contents + irel->r_offset);
- info->changed_contents = TRUE;
-
- /* Reduce the use count on this got entry by one, possibly
- eliminating it. */
- if (--info->gotent->use_count == 0)
- {
- int sz = alpha_got_entry_size (r_type);
- alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
- if (!info->h)
- alpha_elf_tdata (info->gotobj)->local_got_size -= sz;
- }
-
- /* Smash the existing GOT relocation for its 16-bit immediate pair. */
- irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), r_type);
- info->changed_relocs = TRUE;
-
- /* ??? Search forward through this basic block looking for insns
- that use the target register. Stop after an insn modifying the
- register is seen, or after a branch or call.
-
- Any such memory load insn may be substituted by a load directly
- off the GP. This allows the memory load insn to be issued before
- the calculated GP register would otherwise be ready.
-
- Any such jsr insn can be replaced by a bsr if it is in range.
-
- This would mean that we'd have to _add_ relocations, the pain of
- which gives one pause. */
-
- return TRUE;
-}
-
-static bfd_boolean
-elf64_alpha_relax_gprelhilo (info, symval, irel, hi)
- struct alpha_relax_info *info;
- bfd_vma symval;
- Elf_Internal_Rela *irel;
- bfd_boolean hi;
-{
- unsigned int insn;
- bfd_signed_vma disp;
- bfd_byte *pos = info->contents + irel->r_offset;
-
- /* ??? This assumes that the compiler doesn't render
-
- array[i]
- as
- ldah t, array(gp) !gprelhigh
- s8addl i, t, t
- ldq r, array(t) !gprellow
-
- which would indeed be the most efficient way to implement this. */
-
- return TRUE;
-
- disp = symval - info->gp;
- if (disp < -0x8000 || disp >= 0x8000)
- return TRUE;
-
- if (hi)
- {
- /* Nop out the high instruction. */
-
- bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos);
- info->changed_contents = TRUE;
-
- irel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
- irel->r_addend = 0;
- info->changed_relocs = TRUE;
- }
- else
- {
- /* Adjust the low instruction to reference GP directly. */
-
- insn = bfd_get_32 (info->abfd, pos);
- insn = (insn & 0xffe00000) | (29 << 16);
- bfd_put_32 (info->abfd, (bfd_vma) insn, pos);
- info->changed_contents = TRUE;
-
- irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
- R_ALPHA_GPREL16);
- info->changed_relocs = TRUE;
- }
-
- return TRUE;
-}
-
-static bfd_boolean
-elf64_alpha_relax_tls_get_addr (info, symval, irel, is_gd)
- struct alpha_relax_info *info;
- bfd_vma symval;
- Elf_Internal_Rela *irel;
- bfd_boolean is_gd;
-{
- bfd_byte *pos[5];
- unsigned int insn;
- Elf_Internal_Rela *gpdisp, *hint;
- bfd_boolean dynamic, use_gottprel, pos1_unusable;
- unsigned long new_symndx;
-
- dynamic = alpha_elf_dynamic_symbol_p (&info->h->root, info->link_info);
-
- /* If a TLS symbol is accessed using IE at least once, there is no point
- to use dynamic model for it. */
- if (is_gd && info->h && (info->h->flags & ALPHA_ELF_LINK_HASH_TLS_IE))
- ;
-
- /* If the symbol is local, and we've already committed to DF_STATIC_TLS,
- then we might as well relax to IE. */
- else if (info->link_info->shared && !dynamic
- && (info->link_info->flags & DF_STATIC_TLS))
- ;
-
- /* Otherwise we must be building an executable to do anything. */
- else if (info->link_info->shared)
- return TRUE;
-
- /* The TLSGD/TLSLDM relocation must be followed by a LITERAL and
- the matching LITUSE_TLS relocations. */
- if (irel + 2 >= info->relend)
- return TRUE;
- if (ELF64_R_TYPE (irel[1].r_info) != R_ALPHA_LITERAL
- || ELF64_R_TYPE (irel[2].r_info) != R_ALPHA_LITUSE
- || irel[2].r_addend != (is_gd ? LITUSE_ALPHA_TLSGD : LITUSE_ALPHA_TLSLDM))
- return TRUE;
-
- /* There must be a GPDISP relocation positioned immediately after the
- LITUSE relocation. */
- gpdisp = elf64_alpha_find_reloc_at_ofs (info->relocs, info->relend,
- irel[2].r_offset + 4, R_ALPHA_GPDISP);
- if (!gpdisp)
- return TRUE;
-
- pos[0] = info->contents + irel[0].r_offset;
- pos[1] = info->contents + irel[1].r_offset;
- pos[2] = info->contents + irel[2].r_offset;
- pos[3] = info->contents + gpdisp->r_offset;
- pos[4] = pos[3] + gpdisp->r_addend;
- pos1_unusable = FALSE;
-
- /* Generally, the positions are not allowed to be out of order, lest the
- modified insn sequence have different register lifetimes. We can make
- an exception when pos 1 is adjacent to pos 0. */
- if (pos[1] + 4 == pos[0])
- {
- bfd_byte *tmp = pos[0];
- pos[0] = pos[1];
- pos[1] = tmp;
- }
- else if (pos[1] < pos[0])
- pos1_unusable = TRUE;
- if (pos[1] >= pos[2] || pos[2] >= pos[3])
- return TRUE;
-
- /* Reduce the use count on the LITERAL relocation. Do this before we
- smash the symndx when we adjust the relocations below. */
- {
- struct alpha_elf_got_entry *lit_gotent;
- struct alpha_elf_link_hash_entry *lit_h;
- unsigned long indx;
-
- BFD_ASSERT (ELF64_R_SYM (irel[1].r_info) >= info->symtab_hdr->sh_info);
- indx = ELF64_R_SYM (irel[1].r_info) - info->symtab_hdr->sh_info;
- lit_h = alpha_elf_sym_hashes (info->abfd)[indx];
-
- while (lit_h->root.root.type == bfd_link_hash_indirect
- || lit_h->root.root.type == bfd_link_hash_warning)
- lit_h = (struct alpha_elf_link_hash_entry *) lit_h->root.root.u.i.link;
-
- for (lit_gotent = lit_h->got_entries; lit_gotent ;
- lit_gotent = lit_gotent->next)
- if (lit_gotent->gotobj == info->gotobj
- && lit_gotent->reloc_type == R_ALPHA_LITERAL
- && lit_gotent->addend == irel[1].r_addend)
- break;
- BFD_ASSERT (lit_gotent);
-
- if (--lit_gotent->use_count == 0)
- {
- int sz = alpha_got_entry_size (R_ALPHA_LITERAL);
- alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
- }
- }
-
- /* Change
-
- lda $16,x($gp) !tlsgd!1
- ldq $27,__tls_get_addr($gp) !literal!1
- jsr $26,($27)__tls_get_addr !lituse_tlsgd!1
- ldah $29,0($26) !gpdisp!2
- lda $29,0($29) !gpdisp!2
- to
- ldq $16,x($gp) !gottprel
- unop
- call_pal rduniq
- addq $16,$0,$0
- unop
- or the first pair to
- lda $16,x($gp) !tprel
- unop
- or
- ldah $16,x($gp) !tprelhi
- lda $16,x($16) !tprello
-
- as appropriate. */
-
- use_gottprel = FALSE;
- new_symndx = is_gd ? ELF64_R_SYM (irel->r_info) : 0;
- switch (!dynamic && !info->link_info->shared)
- {
- case 1:
- {
- bfd_vma tp_base;
- bfd_signed_vma disp;
-
- BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
- tp_base = alpha_get_tprel_base (info->link_info);
- disp = symval - tp_base;
-
- if (disp >= -0x8000 && disp < 0x8000)
- {
- insn = (OP_LDA << 26) | (16 << 21) | (31 << 16);
- bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
- bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
-
- irel[0].r_offset = pos[0] - info->contents;
- irel[0].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_TPREL16);
- irel[1].r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
- break;
- }
- else if (disp >= -(bfd_signed_vma) 0x80000000
- && disp < (bfd_signed_vma) 0x7fff8000
- && !pos1_unusable)
- {
- insn = (OP_LDAH << 26) | (16 << 21) | (31 << 16);
- bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
- insn = (OP_LDA << 26) | (16 << 21) | (16 << 16);
- bfd_put_32 (info->abfd, (bfd_vma) insn, pos[1]);
-
- irel[0].r_offset = pos[0] - info->contents;
- irel[0].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_TPRELHI);
- irel[1].r_offset = pos[1] - info->contents;
- irel[1].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_TPRELLO);
- break;
- }
- }
- /* FALLTHRU */
-
- default:
- use_gottprel = TRUE;
-
- insn = (OP_LDQ << 26) | (16 << 21) | (29 << 16);
- bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
- bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
-
- irel[0].r_offset = pos[0] - info->contents;
- irel[0].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_GOTTPREL);
- irel[1].r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
- break;
- }
-
- bfd_put_32 (info->abfd, (bfd_vma) INSN_RDUNIQ, pos[2]);
-
- insn = INSN_ADDQ | (16 << 21) | (0 << 16) | (0 << 0);
- bfd_put_32 (info->abfd, (bfd_vma) insn, pos[3]);
-
- bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[4]);
-
- irel[2].r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
- gpdisp->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
-
- hint = elf64_alpha_find_reloc_at_ofs (info->relocs, info->relend,
- irel[2].r_offset, R_ALPHA_HINT);
- if (hint)
- hint->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
-
- info->changed_contents = TRUE;
- info->changed_relocs = TRUE;
-
- /* Reduce the use count on the TLSGD/TLSLDM relocation. */
- if (--info->gotent->use_count == 0)
- {
- int sz = alpha_got_entry_size (info->gotent->reloc_type);
- alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
- if (!info->h)
- alpha_elf_tdata (info->gotobj)->local_got_size -= sz;
- }
-
- /* If we've switched to a GOTTPREL relocation, increment the reference
- count on that got entry. */
- if (use_gottprel)
- {
- struct alpha_elf_got_entry *tprel_gotent;
-
- for (tprel_gotent = *info->first_gotent; tprel_gotent ;
- tprel_gotent = tprel_gotent->next)
- if (tprel_gotent->gotobj == info->gotobj
- && tprel_gotent->reloc_type == R_ALPHA_GOTTPREL
- && tprel_gotent->addend == irel->r_addend)
- break;
- if (tprel_gotent)
- tprel_gotent->use_count++;
- else
- {
- if (info->gotent->use_count == 0)
- tprel_gotent = info->gotent;
- else
- {
- tprel_gotent = (struct alpha_elf_got_entry *)
- bfd_alloc (info->abfd, sizeof (struct alpha_elf_got_entry));
- if (!tprel_gotent)
- return FALSE;
-
- tprel_gotent->next = *info->first_gotent;
- *info->first_gotent = tprel_gotent;
-
- tprel_gotent->gotobj = info->gotobj;
- tprel_gotent->addend = irel->r_addend;
- tprel_gotent->got_offset = -1;
- tprel_gotent->reloc_done = 0;
- tprel_gotent->reloc_xlated = 0;
- }
-
- tprel_gotent->use_count = 1;
- tprel_gotent->reloc_type = R_ALPHA_GOTTPREL;
- }
- }
-
- return TRUE;
-}
-
-static bfd_boolean
-elf64_alpha_relax_section (abfd, sec, link_info, again)
- bfd *abfd;
- asection *sec;
- struct bfd_link_info *link_info;
- bfd_boolean *again;
-{
- Elf_Internal_Shdr *symtab_hdr;
- Elf_Internal_Rela *internal_relocs;
- Elf_Internal_Rela *irel, *irelend;
- Elf_Internal_Sym *isymbuf = NULL;
- struct alpha_elf_got_entry **local_got_entries;
- struct alpha_relax_info info;
-
- /* We are not currently changing any sizes, so only one pass. */
- *again = FALSE;
-
- if (link_info->relocatable
- || ((sec->flags & (SEC_CODE | SEC_RELOC | SEC_ALLOC))
- != (SEC_CODE | SEC_RELOC | SEC_ALLOC))
- || sec->reloc_count == 0)
- return TRUE;
-
- symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
- local_got_entries = alpha_elf_tdata(abfd)->local_got_entries;
-
- /* Load the relocations for this section. */
- internal_relocs = (_bfd_elf_link_read_relocs
- (abfd, sec, (PTR) NULL, (Elf_Internal_Rela *) NULL,
- link_info->keep_memory));
- if (internal_relocs == NULL)
- return FALSE;
-
- memset(&info, 0, sizeof (info));
- info.abfd = abfd;
- info.sec = sec;
- info.link_info = link_info;
- info.symtab_hdr = symtab_hdr;
- info.relocs = internal_relocs;
- info.relend = irelend = internal_relocs + sec->reloc_count;
-
- /* Find the GP for this object. Do not store the result back via
- _bfd_set_gp_value, since this could change again before final. */
- info.gotobj = alpha_elf_tdata (abfd)->gotobj;
- if (info.gotobj)
- {
- asection *sgot = alpha_elf_tdata (info.gotobj)->got;
- info.gp = (sgot->output_section->vma
- + sgot->output_offset
- + 0x8000);
- }
-
- /* Get the section contents. */
- if (elf_section_data (sec)->this_hdr.contents != NULL)
- info.contents = elf_section_data (sec)->this_hdr.contents;
- else
- {
- if (!bfd_malloc_and_get_section (abfd, sec, &info.contents))
- goto error_return;
- }
-
- for (irel = internal_relocs; irel < irelend; irel++)
- {
- bfd_vma symval;
- struct alpha_elf_got_entry *gotent;
- unsigned long r_type = ELF64_R_TYPE (irel->r_info);
- unsigned long r_symndx = ELF64_R_SYM (irel->r_info);
-
- /* Early exit for unhandled or unrelaxable relocations. */
- switch (r_type)
- {
- case R_ALPHA_LITERAL:
- case R_ALPHA_GPRELHIGH:
- case R_ALPHA_GPRELLOW:
- case R_ALPHA_GOTDTPREL:
- case R_ALPHA_GOTTPREL:
- case R_ALPHA_TLSGD:
- break;
-
- case R_ALPHA_TLSLDM:
- /* The symbol for a TLSLDM reloc is ignored. Collapse the
- reloc to the 0 symbol so that they all match. */
- r_symndx = 0;
- break;
-
- default:
- continue;
- }
-
- /* Get the value of the symbol referred to by the reloc. */
- if (r_symndx < symtab_hdr->sh_info)
- {
- /* A local symbol. */
- Elf_Internal_Sym *isym;
-
- /* Read this BFD's local symbols. */
- if (isymbuf == NULL)
- {
- isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
- if (isymbuf == NULL)
- isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
- symtab_hdr->sh_info, 0,
- NULL, NULL, NULL);
- if (isymbuf == NULL)
- goto error_return;
- }
-
- isym = isymbuf + r_symndx;
-
- /* Given the symbol for a TLSLDM reloc is ignored, this also
- means forcing the symbol value to the tp base. */
- if (r_type == R_ALPHA_TLSLDM)
- {
- info.tsec = bfd_abs_section_ptr;
- symval = alpha_get_tprel_base (info.link_info);
- }
- else
- {
- symval = isym->st_value;
- if (isym->st_shndx == SHN_UNDEF)
- continue;
- else if (isym->st_shndx == SHN_ABS)
- info.tsec = bfd_abs_section_ptr;
- else if (isym->st_shndx == SHN_COMMON)
- info.tsec = bfd_com_section_ptr;
- else
- info.tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
- }
-
- info.h = NULL;
- info.other = isym->st_other;
- if (local_got_entries)
- info.first_gotent = &local_got_entries[r_symndx];
- else
- {
- info.first_gotent = &info.gotent;
- info.gotent = NULL;
- }
- }
- else
- {
- unsigned long indx;
- struct alpha_elf_link_hash_entry *h;
-
- indx = r_symndx - symtab_hdr->sh_info;
- h = alpha_elf_sym_hashes (abfd)[indx];
- BFD_ASSERT (h != NULL);
-
- while (h->root.root.type == bfd_link_hash_indirect
- || h->root.root.type == bfd_link_hash_warning)
- h = (struct alpha_elf_link_hash_entry *)h->root.root.u.i.link;
-
- /* If the symbol is undefined, we can't do anything with it. */
- if (h->root.root.type == bfd_link_hash_undefined)
- continue;
-
- /* If the symbol isn't defined in the current module,
- again we can't do anything. */
- if (h->root.root.type == bfd_link_hash_undefweak)
- {
- info.tsec = bfd_abs_section_ptr;
- symval = 0;
- }
- else if (!h->root.def_regular)
- {
- /* Except for TLSGD relocs, which can sometimes be
- relaxed to GOTTPREL relocs. */
- if (r_type != R_ALPHA_TLSGD)
- continue;
- info.tsec = bfd_abs_section_ptr;
- symval = 0;
- }
- else
- {
- info.tsec = h->root.root.u.def.section;
- symval = h->root.root.u.def.value;
- }
-
- info.h = h;
- info.other = h->root.other;
- info.first_gotent = &h->got_entries;
- }
-
- /* Search for the got entry to be used by this relocation. */
- for (gotent = *info.first_gotent; gotent ; gotent = gotent->next)
- if (gotent->gotobj == info.gotobj
- && gotent->reloc_type == r_type
- && gotent->addend == irel->r_addend)
- break;
- info.gotent = gotent;
-
- symval += info.tsec->output_section->vma + info.tsec->output_offset;
- symval += irel->r_addend;
-
- switch (r_type)
- {
- case R_ALPHA_LITERAL:
- BFD_ASSERT(info.gotent != NULL);
-
- /* If there exist LITUSE relocations immediately following, this
- opens up all sorts of interesting optimizations, because we
- now know every location that this address load is used. */
- if (irel+1 < irelend
- && ELF64_R_TYPE (irel[1].r_info) == R_ALPHA_LITUSE)
- {
- if (!elf64_alpha_relax_with_lituse (&info, symval, irel))
- goto error_return;
- }
- else
- {
- if (!elf64_alpha_relax_got_load (&info, symval, irel, r_type))
- goto error_return;
- }
- break;
-
- case R_ALPHA_GPRELHIGH:
- case R_ALPHA_GPRELLOW:
- if (!elf64_alpha_relax_gprelhilo (&info, symval, irel,
- r_type == R_ALPHA_GPRELHIGH))
- goto error_return;
- break;
-
- case R_ALPHA_GOTDTPREL:
- case R_ALPHA_GOTTPREL:
- BFD_ASSERT(info.gotent != NULL);
- if (!elf64_alpha_relax_got_load (&info, symval, irel, r_type))
- goto error_return;
- break;
-
- case R_ALPHA_TLSGD:
- case R_ALPHA_TLSLDM:
- BFD_ASSERT(info.gotent != NULL);
- if (!elf64_alpha_relax_tls_get_addr (&info, symval, irel,
- r_type == R_ALPHA_TLSGD))
- goto error_return;
- break;
- }
- }
-
- if (!elf64_alpha_size_plt_section (link_info))
- return FALSE;
- if (!elf64_alpha_size_got_sections (link_info))
- return FALSE;
- if (!elf64_alpha_size_rela_got_section (link_info))
- return FALSE;
-
- if (isymbuf != NULL
- && symtab_hdr->contents != (unsigned char *) isymbuf)
- {
- if (!link_info->keep_memory)
- free (isymbuf);
- else
- {
- /* Cache the symbols for elf_link_input_bfd. */
- symtab_hdr->contents = (unsigned char *) isymbuf;
- }
- }
-
- if (info.contents != NULL
- && elf_section_data (sec)->this_hdr.contents != info.contents)
- {
- if (!info.changed_contents && !link_info->keep_memory)
- free (info.contents);
- else
- {
- /* Cache the section contents for elf_link_input_bfd. */
- elf_section_data (sec)->this_hdr.contents = info.contents;
- }
- }
-
- if (elf_section_data (sec)->relocs != internal_relocs)
- {
- if (!info.changed_relocs)
- free (internal_relocs);
- else
- elf_section_data (sec)->relocs = internal_relocs;
- }
-
- *again = info.changed_contents || info.changed_relocs;
-
- return TRUE;
-
- error_return:
- if (isymbuf != NULL
- && symtab_hdr->contents != (unsigned char *) isymbuf)
- free (isymbuf);
- if (info.contents != NULL
- && elf_section_data (sec)->this_hdr.contents != info.contents)
- free (info.contents);
- if (internal_relocs != NULL
- && elf_section_data (sec)->relocs != internal_relocs)
- free (internal_relocs);
- return FALSE;
-}
-
/* PLT/GOT Stuff */
#define PLT_HEADER_SIZE 32
#define PLT_HEADER_WORD1 (bfd_vma) 0xc3600000 /* br $27,.+4 */
@@ -2333,9 +1088,7 @@ elf64_alpha_section_from_shdr (bfd *abfd,
/* Convert Alpha specific section flags to bfd internal section flags. */
static bfd_boolean
-elf64_alpha_section_flags (flags, hdr)
- flagword *flags;
- const Elf_Internal_Shdr *hdr;
+elf64_alpha_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
{
if (hdr->sh_flags & SHF_ALPHA_GPREL)
*flags |= SEC_SMALL_DATA;
@@ -2347,10 +1100,7 @@ elf64_alpha_section_flags (flags, hdr)
section name, which is a hack, but ought to work. */
static bfd_boolean
-elf64_alpha_fake_sections (abfd, hdr, sec)
- bfd *abfd;
- Elf_Internal_Shdr *hdr;
- asection *sec;
+elf64_alpha_fake_sections (bfd *abfd, Elf_Internal_Shdr *hdr, asection *sec)
{
register const char *name;
@@ -2380,14 +1130,11 @@ elf64_alpha_fake_sections (abfd, hdr, sec)
file. We use it to put .comm items in .sbss, and not .bss. */
static bfd_boolean
-elf64_alpha_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp)
- bfd *abfd;
- struct bfd_link_info *info;
- Elf_Internal_Sym *sym;
- const char **namep ATTRIBUTE_UNUSED;
- flagword *flagsp ATTRIBUTE_UNUSED;
- asection **secp;
- bfd_vma *valp;
+elf64_alpha_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
+ Elf_Internal_Sym *sym,
+ const char **namep ATTRIBUTE_UNUSED,
+ flagword *flagsp ATTRIBUTE_UNUSED,
+ asection **secp, bfd_vma *valp)
{
if (sym->st_shndx == SHN_COMMON
&& !info->relocatable
@@ -2418,9 +1165,8 @@ elf64_alpha_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp)
/* Create the .got section. */
static bfd_boolean
-elf64_alpha_create_got_section(abfd, info)
- bfd *abfd;
- struct bfd_link_info *info ATTRIBUTE_UNUSED;
+elf64_alpha_create_got_section (bfd *abfd,
+ struct bfd_link_info *info ATTRIBUTE_UNUSED)
{
asection *s;
@@ -2448,9 +1194,7 @@ elf64_alpha_create_got_section(abfd, info)
/* Create all the dynamic sections. */
static bfd_boolean
-elf64_alpha_create_dynamic_sections (abfd, info)
- bfd *abfd;
- struct bfd_link_info *info;
+elf64_alpha_create_dynamic_sections (bfd *abfd, struct bfd_link_info *info)
{
asection *s;
struct elf_link_hash_entry *h;
@@ -2537,10 +1281,8 @@ elf64_alpha_create_dynamic_sections (abfd, info)
ecoff_debug_info structure. */
static bfd_boolean
-elf64_alpha_read_ecoff_info (abfd, section, debug)
- bfd *abfd;
- asection *section;
- struct ecoff_debug_info *debug;
+elf64_alpha_read_ecoff_info (bfd *abfd, asection *section,
+ struct ecoff_debug_info *debug)
{
HDRR *symhdr;
const struct ecoff_debug_swap *swap;
@@ -2625,9 +1367,7 @@ elf64_alpha_read_ecoff_info (abfd, section, debug)
/* Alpha ELF local labels start with '$'. */
static bfd_boolean
-elf64_alpha_is_local_label_name (abfd, name)
- bfd *abfd ATTRIBUTE_UNUSED;
- const char *name;
+elf64_alpha_is_local_label_name (bfd *abfd ATTRIBUTE_UNUSED, const char *name)
{
return name[0] == '$';
}
@@ -2644,15 +1384,10 @@ struct mips_elf_find_line
};
static bfd_boolean
-elf64_alpha_find_nearest_line (abfd, section, symbols, offset, filename_ptr,
- functionname_ptr, line_ptr)
- bfd *abfd;
- asection *section;
- asymbol **symbols;
- bfd_vma offset;
- const char **filename_ptr;
- const char **functionname_ptr;
- unsigned int *line_ptr;
+elf64_alpha_find_nearest_line (bfd *abfd, asection *section, asymbol **symbols,
+ bfd_vma offset, const char **filename_ptr,
+ const char **functionname_ptr,
+ unsigned int *line_ptr)
{
asection *msec;
@@ -2755,9 +1490,7 @@ struct extsym_info
};
static bfd_boolean
-elf64_alpha_output_extsym (h, data)
- struct alpha_elf_link_hash_entry *h;
- PTR data;
+elf64_alpha_output_extsym (struct alpha_elf_link_hash_entry *h, PTR data)
{
struct extsym_info *einfo = (struct extsym_info *) data;
bfd_boolean strip;
@@ -2892,11 +1625,9 @@ elf64_alpha_output_extsym (h, data)
/* Search for and possibly create a got entry. */
static struct alpha_elf_got_entry *
-get_got_entry (abfd, h, r_type, r_symndx, r_addend)
- bfd *abfd;
- struct alpha_elf_link_hash_entry *h;
- unsigned long r_type, r_symndx;
- bfd_vma r_addend;
+get_got_entry (bfd *abfd, struct alpha_elf_link_hash_entry *h,
+ unsigned long r_type, unsigned long r_symndx,
+ bfd_vma r_addend)
{
struct alpha_elf_got_entry *gotent;
struct alpha_elf_got_entry **slot;
@@ -2971,11 +1702,8 @@ get_got_entry (abfd, h, r_type, r_symndx, r_addend)
/* Handle dynamic relocations when doing an Alpha ELF link. */
static bfd_boolean
-elf64_alpha_check_relocs (abfd, info, sec, relocs)
- bfd *abfd;
- struct bfd_link_info *info;
- asection *sec;
- const Elf_Internal_Rela *relocs;
+elf64_alpha_check_relocs (bfd *abfd, struct bfd_link_info *info,
+ asection *sec, const Elf_Internal_Rela *relocs)
{
bfd *dynobj;
asection *sreloc;
@@ -3044,7 +1772,8 @@ elf64_alpha_check_relocs (abfd, info, sec, relocs)
this may help reduce memory usage and processing time later. */
maybe_dynamic = FALSE;
if (h && ((info->shared
- && (!info->symbolic || info->unresolved_syms_in_shared_libs == RM_IGNORE))
+ && (!info->symbolic
+ || info->unresolved_syms_in_shared_libs == RM_IGNORE))
|| !h->root.def_regular
|| h->root.root.type == bfd_link_hash_defweak))
maybe_dynamic = TRUE;
@@ -3250,9 +1979,8 @@ elf64_alpha_check_relocs (abfd, info, sec, relocs)
understand. */
static bfd_boolean
-elf64_alpha_adjust_dynamic_symbol (info, h)
- struct bfd_link_info *info;
- struct elf_link_hash_entry *h;
+elf64_alpha_adjust_dynamic_symbol (struct bfd_link_info *info,
+ struct elf_link_hash_entry *h)
{
bfd *dynobj;
asection *s;
@@ -3338,9 +2066,8 @@ elf64_alpha_adjust_dynamic_symbol (info, h)
in these situations. */
static bfd_boolean
-elf64_alpha_merge_ind_symbols (hi, dummy)
- struct alpha_elf_link_hash_entry *hi;
- PTR dummy ATTRIBUTE_UNUSED;
+elf64_alpha_merge_ind_symbols (struct alpha_elf_link_hash_entry *hi,
+ PTR dummy ATTRIBUTE_UNUSED)
{
struct alpha_elf_link_hash_entry *hs;
@@ -3414,8 +2141,7 @@ elf64_alpha_merge_ind_symbols (hi, dummy)
/* Is it possible to merge two object file's .got tables? */
static bfd_boolean
-elf64_alpha_can_merge_gots (a, b)
- bfd *a, *b;
+elf64_alpha_can_merge_gots (bfd *a, bfd *b)
{
int total = alpha_elf_tdata (a)->total_got_size;
bfd *bsub;
@@ -3475,8 +2201,7 @@ elf64_alpha_can_merge_gots (a, b)
/* Actually merge two .got tables. */
static void
-elf64_alpha_merge_gots (a, b)
- bfd *a, *b;
+elf64_alpha_merge_gots (bfd *a, bfd *b)
{
int total = alpha_elf_tdata (a)->total_got_size;
bfd *bsub;
@@ -3574,9 +2299,8 @@ elf64_alpha_merge_gots (a, b)
/* Calculate the offsets for the got entries. */
static bfd_boolean
-elf64_alpha_calc_got_offsets_for_symbol (h, arg)
- struct alpha_elf_link_hash_entry *h;
- PTR arg ATTRIBUTE_UNUSED;
+elf64_alpha_calc_got_offsets_for_symbol (struct alpha_elf_link_hash_entry *h,
+ PTR arg ATTRIBUTE_UNUSED)
{
bfd_boolean result = TRUE;
struct alpha_elf_got_entry *gotent;
@@ -3607,8 +2331,7 @@ elf64_alpha_calc_got_offsets_for_symbol (h, arg)
}
static void
-elf64_alpha_calc_got_offsets (info)
- struct bfd_link_info *info;
+elf64_alpha_calc_got_offsets (struct bfd_link_info *info)
{
bfd *i, *got_list = alpha_elf_hash_table(info)->got_list;
@@ -3653,8 +2376,7 @@ elf64_alpha_calc_got_offsets (info)
/* Constructs the gots. */
static bfd_boolean
-elf64_alpha_size_got_sections (info)
- struct bfd_link_info *info;
+elf64_alpha_size_got_sections (struct bfd_link_info *info)
{
bfd *i, *got_list, *cur_got_obj = NULL;
int something_changed = 0;
@@ -3729,42 +2451,8 @@ elf64_alpha_size_got_sections (info)
return TRUE;
}
-/* Called from relax_section to rebuild the PLT in light of
- potential changes in the function's status. */
-
static bfd_boolean
-elf64_alpha_size_plt_section (info)
- struct bfd_link_info *info;
-{
- asection *splt, *spltrel;
- unsigned long entries;
- bfd *dynobj;
-
- dynobj = elf_hash_table(info)->dynobj;
- splt = bfd_get_section_by_name(dynobj, ".plt");
- if (splt == NULL)
- return TRUE;
-
- splt->size = 0;
-
- alpha_elf_link_hash_traverse (alpha_elf_hash_table (info),
- elf64_alpha_size_plt_section_1, splt);
-
- /* Every plt entry requires a JMP_SLOT relocation. */
- spltrel = bfd_get_section_by_name (dynobj, ".rela.plt");
- if (splt->size)
- entries = (splt->size - PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
- else
- entries = 0;
- spltrel->size = entries * sizeof (Elf64_External_Rela);
-
- return TRUE;
-}
-
-static bfd_boolean
-elf64_alpha_size_plt_section_1 (h, data)
- struct alpha_elf_link_hash_entry *h;
- PTR data;
+elf64_alpha_size_plt_section_1 (struct alpha_elf_link_hash_entry *h, PTR data)
{
asection *splt = (asection *) data;
struct alpha_elf_got_entry *gotent;
@@ -3805,10 +2493,40 @@ elf64_alpha_size_plt_section_1 (h, data)
return TRUE;
}
+/* Called from relax_section to rebuild the PLT in light of
+ potential changes in the function's status. */
+
static bfd_boolean
-elf64_alpha_always_size_sections (output_bfd, info)
- bfd *output_bfd ATTRIBUTE_UNUSED;
- struct bfd_link_info *info;
+elf64_alpha_size_plt_section (struct bfd_link_info *info)
+{
+ asection *splt, *spltrel;
+ unsigned long entries;
+ bfd *dynobj;
+
+ dynobj = elf_hash_table(info)->dynobj;
+ splt = bfd_get_section_by_name(dynobj, ".plt");
+ if (splt == NULL)
+ return TRUE;
+
+ splt->size = 0;
+
+ alpha_elf_link_hash_traverse (alpha_elf_hash_table (info),
+ elf64_alpha_size_plt_section_1, splt);
+
+ /* Every plt entry requires a JMP_SLOT relocation. */
+ spltrel = bfd_get_section_by_name (dynobj, ".rela.plt");
+ if (splt->size)
+ entries = (splt->size - PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
+ else
+ entries = 0;
+ spltrel->size = entries * sizeof (Elf64_External_Rela);
+
+ return TRUE;
+}
+
+static bfd_boolean
+elf64_alpha_always_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info)
{
bfd *i;
@@ -3842,8 +2560,7 @@ elf64_alpha_always_size_sections (output_bfd, info)
/* The number of dynamic relocations required by a static relocation. */
static int
-alpha_dynamic_entries_for_reloc (r_type, dynamic, shared)
- int r_type, dynamic, shared;
+alpha_dynamic_entries_for_reloc (int r_type, int dynamic, int shared)
{
switch (r_type)
{
@@ -3874,9 +2591,8 @@ alpha_dynamic_entries_for_reloc (r_type, dynamic, shared)
/* Work out the sizes of the dynamic relocation entries. */
static bfd_boolean
-elf64_alpha_calc_dynrel_sizes (h, info)
- struct alpha_elf_link_hash_entry *h;
- struct bfd_link_info *info;
+elf64_alpha_calc_dynrel_sizes (struct alpha_elf_link_hash_entry *h,
+ struct bfd_link_info *info)
{
bfd_boolean dynamic;
struct alpha_elf_reloc_entry *relent;
@@ -3927,11 +2643,57 @@ elf64_alpha_calc_dynrel_sizes (h, info)
return TRUE;
}
+/* Subroutine of elf64_alpha_size_rela_got_section for doing the
+ global symbols. */
+
+static bfd_boolean
+elf64_alpha_size_rela_got_1 (struct alpha_elf_link_hash_entry *h,
+ struct bfd_link_info *info)
+{
+ bfd_boolean dynamic;
+ struct alpha_elf_got_entry *gotent;
+ unsigned long entries;
+
+ if (h->root.root.type == bfd_link_hash_warning)
+ h = (struct alpha_elf_link_hash_entry *) h->root.root.u.i.link;
+
+ /* If the symbol is dynamic, we'll need all the relocations in their
+ natural form. If this is a shared object, and it has been forced
+ local, we'll need the same number of RELATIVE relocations. */
+ dynamic = alpha_elf_dynamic_symbol_p (&h->root, info);
+
+ /* If the symbol is a hidden undefined weak, then we never have any
+ relocations. Avoid the loop which may want to add RELATIVE relocs
+ based on info->shared. */
+ if (h->root.root.type == bfd_link_hash_undefweak && !dynamic)
+ return TRUE;
+
+ entries = 0;
+ for (gotent = h->got_entries; gotent ; gotent = gotent->next)
+ if (gotent->use_count > 0)
+ entries += alpha_dynamic_entries_for_reloc (gotent->reloc_type,
+ dynamic, info->shared);
+
+ /* If we are using a .plt entry, subtract one, as the first
+ reference uses a .rela.plt entry instead. */
+ if (h->root.plt.offset != MINUS_ONE)
+ entries--;
+
+ if (entries > 0)
+ {
+ bfd *dynobj = elf_hash_table(info)->dynobj;
+ asection *srel = bfd_get_section_by_name (dynobj, ".rela.got");
+ BFD_ASSERT (srel != NULL);
+ srel->size += sizeof (Elf64_External_Rela) * entries;
+ }
+
+ return TRUE;
+}
+
/* Set the sizes of the dynamic relocation sections. */
static bfd_boolean
-elf64_alpha_size_rela_got_section (info)
- struct bfd_link_info *info;
+elf64_alpha_size_rela_got_section (struct bfd_link_info *info)
{
unsigned long entries;
bfd *i, *dynobj;
@@ -3980,60 +2742,11 @@ elf64_alpha_size_rela_got_section (info)
return TRUE;
}
-/* Subroutine of elf64_alpha_size_rela_got_section for doing the
- global symbols. */
-
-static bfd_boolean
-elf64_alpha_size_rela_got_1 (h, info)
- struct alpha_elf_link_hash_entry *h;
- struct bfd_link_info *info;
-{
- bfd_boolean dynamic;
- struct alpha_elf_got_entry *gotent;
- unsigned long entries;
-
- if (h->root.root.type == bfd_link_hash_warning)
- h = (struct alpha_elf_link_hash_entry *) h->root.root.u.i.link;
-
- /* If the symbol is dynamic, we'll need all the relocations in their
- natural form. If this is a shared object, and it has been forced
- local, we'll need the same number of RELATIVE relocations. */
- dynamic = alpha_elf_dynamic_symbol_p (&h->root, info);
-
- /* If the symbol is a hidden undefined weak, then we never have any
- relocations. Avoid the loop which may want to add RELATIVE relocs
- based on info->shared. */
- if (h->root.root.type == bfd_link_hash_undefweak && !dynamic)
- return TRUE;
-
- entries = 0;
- for (gotent = h->got_entries; gotent ; gotent = gotent->next)
- if (gotent->use_count > 0)
- entries += alpha_dynamic_entries_for_reloc (gotent->reloc_type,
- dynamic, info->shared);
-
- /* If we are using a .plt entry, subtract one, as the first
- reference uses a .rela.plt entry instead. */
- if (h->root.plt.offset != MINUS_ONE)
- entries--;
-
- if (entries > 0)
- {
- bfd *dynobj = elf_hash_table(info)->dynobj;
- asection *srel = bfd_get_section_by_name (dynobj, ".rela.got");
- BFD_ASSERT (srel != NULL);
- srel->size += sizeof (Elf64_External_Rela) * entries;
- }
-
- return TRUE;
-}
-
/* Set the sizes of the dynamic sections. */
static bfd_boolean
-elf64_alpha_size_dynamic_sections (output_bfd, info)
- bfd *output_bfd ATTRIBUTE_UNUSED;
- struct bfd_link_info *info;
+elf64_alpha_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info)
{
bfd *dynobj;
asection *s;
@@ -4161,17 +2874,1031 @@ elf64_alpha_size_dynamic_sections (output_bfd, info)
return TRUE;
}
+
+/* These functions do relaxation for Alpha ELF.
+
+ Currently I'm only handling what I can do with existing compiler
+ and assembler support, which means no instructions are removed,
+ though some may be nopped. At this time GCC does not emit enough
+ information to do all of the relaxing that is possible. It will
+ take some not small amount of work for that to happen.
+
+ There are a couple of interesting papers that I once read on this
+ subject, that I cannot find references to at the moment, that
+ related to Alpha in particular. They are by David Wall, then of
+ DEC WRL. */
+
+#define OP_LDA 0x08
+#define OP_LDAH 0x09
+#define INSN_JSR 0x68004000
+#define INSN_JSR_MASK 0xfc00c000
+#define OP_LDQ 0x29
+#define OP_BR 0x30
+#define OP_BSR 0x34
+#define INSN_UNOP 0x2ffe0000
+#define INSN_ADDQ 0x40000400
+#define INSN_RDUNIQ 0x0000009e
+
+struct alpha_relax_info
+{
+ bfd *abfd;
+ asection *sec;
+ bfd_byte *contents;
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Rela *relocs, *relend;
+ struct bfd_link_info *link_info;
+ bfd_vma gp;
+ bfd *gotobj;
+ asection *tsec;
+ struct alpha_elf_link_hash_entry *h;
+ struct alpha_elf_got_entry **first_gotent;
+ struct alpha_elf_got_entry *gotent;
+ bfd_boolean changed_contents;
+ bfd_boolean changed_relocs;
+ unsigned char other;
+};
+
+static Elf_Internal_Rela *
+elf64_alpha_find_reloc_at_ofs (Elf_Internal_Rela *rel,
+ Elf_Internal_Rela *relend,
+ bfd_vma offset, int type)
+{
+ while (rel < relend)
+ {
+ if (rel->r_offset == offset
+ && ELF64_R_TYPE (rel->r_info) == (unsigned int) type)
+ return rel;
+ ++rel;
+ }
+ return NULL;
+}
+
+static bfd_boolean
+elf64_alpha_relax_got_load (struct alpha_relax_info *info, bfd_vma symval,
+ Elf_Internal_Rela *irel, unsigned long r_type)
+{
+ unsigned int insn;
+ bfd_signed_vma disp;
+
+ /* Get the instruction. */
+ insn = bfd_get_32 (info->abfd, info->contents + irel->r_offset);
+
+ if (insn >> 26 != OP_LDQ)
+ {
+ reloc_howto_type *howto = elf64_alpha_howto_table + r_type;
+ ((*_bfd_error_handler)
+ ("%B: %A+0x%lx: warning: %s relocation against unexpected insn",
+ info->abfd, info->sec,
+ (unsigned long) irel->r_offset, howto->name));
+ return TRUE;
+ }
+
+ /* Can't relax dynamic symbols. */
+ if (alpha_elf_dynamic_symbol_p (&info->h->root, info->link_info))
+ return TRUE;
+
+ /* Can't use local-exec relocations in shared libraries. */
+ if (r_type == R_ALPHA_GOTTPREL && info->link_info->shared)
+ return TRUE;
+
+ if (r_type == R_ALPHA_LITERAL)
+ {
+ /* Look for nice constant addresses. This includes the not-uncommon
+ special case of 0 for undefweak symbols. */
+ if ((info->h && info->h->root.root.type == bfd_link_hash_undefweak)
+ || (!info->link_info->shared
+ && (symval >= (bfd_vma)-0x8000 || symval < 0x8000)))
+ {
+ disp = 0;
+ insn = (OP_LDA << 26) | (insn & (31 << 21)) | (31 << 16);
+ insn |= (symval & 0xffff);
+ r_type = R_ALPHA_NONE;
+ }
+ else
+ {
+ disp = symval - info->gp;
+ insn = (OP_LDA << 26) | (insn & 0x03ff0000);
+ r_type = R_ALPHA_GPREL16;
+ }
+ }
+ else
+ {
+ bfd_vma dtp_base, tp_base;
+
+ BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
+ dtp_base = alpha_get_dtprel_base (info->link_info);
+ tp_base = alpha_get_tprel_base (info->link_info);
+ disp = symval - (r_type == R_ALPHA_GOTDTPREL ? dtp_base : tp_base);
+
+ insn = (OP_LDA << 26) | (insn & (31 << 21)) | (31 << 16);
+
+ switch (r_type)
+ {
+ case R_ALPHA_GOTDTPREL:
+ r_type = R_ALPHA_DTPREL16;
+ break;
+ case R_ALPHA_GOTTPREL:
+ r_type = R_ALPHA_TPREL16;
+ break;
+ default:
+ BFD_ASSERT (0);
+ return FALSE;
+ }
+ }
+
+ if (disp < -0x8000 || disp >= 0x8000)
+ return TRUE;
+
+ bfd_put_32 (info->abfd, (bfd_vma) insn, info->contents + irel->r_offset);
+ info->changed_contents = TRUE;
+
+ /* Reduce the use count on this got entry by one, possibly
+ eliminating it. */
+ if (--info->gotent->use_count == 0)
+ {
+ int sz = alpha_got_entry_size (r_type);
+ alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
+ if (!info->h)
+ alpha_elf_tdata (info->gotobj)->local_got_size -= sz;
+ }
+
+ /* Smash the existing GOT relocation for its 16-bit immediate pair. */
+ irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info), r_type);
+ info->changed_relocs = TRUE;
+
+ /* ??? Search forward through this basic block looking for insns
+ that use the target register. Stop after an insn modifying the
+ register is seen, or after a branch or call.
+
+ Any such memory load insn may be substituted by a load directly
+ off the GP. This allows the memory load insn to be issued before
+ the calculated GP register would otherwise be ready.
+
+ Any such jsr insn can be replaced by a bsr if it is in range.
+
+ This would mean that we'd have to _add_ relocations, the pain of
+ which gives one pause. */
+
+ return TRUE;
+}
+
+static bfd_vma
+elf64_alpha_relax_opt_call (struct alpha_relax_info *info, bfd_vma symval)
+{
+ /* If the function has the same gp, and we can identify that the
+ function does not use its function pointer, we can eliminate the
+ address load. */
+
+ /* If the symbol is marked NOPV, we are being told the function never
+ needs its procedure value. */
+ if ((info->other & STO_ALPHA_STD_GPLOAD) == STO_ALPHA_NOPV)
+ return symval;
+
+ /* If the symbol is marked STD_GP, we are being told the function does
+ a normal ldgp in the first two words. */
+ else if ((info->other & STO_ALPHA_STD_GPLOAD) == STO_ALPHA_STD_GPLOAD)
+ ;
+
+ /* Otherwise, we may be able to identify a GP load in the first two
+ words, which we can then skip. */
+ else
+ {
+ Elf_Internal_Rela *tsec_relocs, *tsec_relend, *tsec_free, *gpdisp;
+ bfd_vma ofs;
+
+ /* Load the relocations from the section that the target symbol is in. */
+ if (info->sec == info->tsec)
+ {
+ tsec_relocs = info->relocs;
+ tsec_relend = info->relend;
+ tsec_free = NULL;
+ }
+ else
+ {
+ tsec_relocs = (_bfd_elf_link_read_relocs
+ (info->abfd, info->tsec, (PTR) NULL,
+ (Elf_Internal_Rela *) NULL,
+ info->link_info->keep_memory));
+ if (tsec_relocs == NULL)
+ return 0;
+ tsec_relend = tsec_relocs + info->tsec->reloc_count;
+ tsec_free = (info->link_info->keep_memory ? NULL : tsec_relocs);
+ }
+
+ /* Recover the symbol's offset within the section. */
+ ofs = (symval - info->tsec->output_section->vma
+ - info->tsec->output_offset);
+
+ /* Look for a GPDISP reloc. */
+ gpdisp = (elf64_alpha_find_reloc_at_ofs
+ (tsec_relocs, tsec_relend, ofs, R_ALPHA_GPDISP));
+
+ if (!gpdisp || gpdisp->r_addend != 4)
+ {
+ if (tsec_free)
+ free (tsec_free);
+ return 0;
+ }
+ if (tsec_free)
+ free (tsec_free);
+ }
+
+ /* We've now determined that we can skip an initial gp load. Verify
+ that the call and the target use the same gp. */
+ if (info->link_info->hash->creator != info->tsec->owner->xvec
+ || info->gotobj != alpha_elf_tdata (info->tsec->owner)->gotobj)
+ return 0;
+
+ return symval + 8;
+}
+
+static bfd_boolean
+elf64_alpha_relax_with_lituse (struct alpha_relax_info *info,
+ bfd_vma symval, Elf_Internal_Rela *irel)
+{
+ Elf_Internal_Rela *urel, *irelend = info->relend;
+ int flags, count, i;
+ bfd_signed_vma disp;
+ bfd_boolean fits16;
+ bfd_boolean fits32;
+ bfd_boolean lit_reused = FALSE;
+ bfd_boolean all_optimized = TRUE;
+ unsigned int lit_insn;
+
+ lit_insn = bfd_get_32 (info->abfd, info->contents + irel->r_offset);
+ if (lit_insn >> 26 != OP_LDQ)
+ {
+ ((*_bfd_error_handler)
+ ("%B: %A+0x%lx: warning: LITERAL relocation against unexpected insn",
+ info->abfd, info->sec,
+ (unsigned long) irel->r_offset));
+ return TRUE;
+ }
+
+ /* Can't relax dynamic symbols. */
+ if (alpha_elf_dynamic_symbol_p (&info->h->root, info->link_info))
+ return TRUE;
+
+ /* Summarize how this particular LITERAL is used. */
+ for (urel = irel+1, flags = count = 0; urel < irelend; ++urel, ++count)
+ {
+ if (ELF64_R_TYPE (urel->r_info) != R_ALPHA_LITUSE)
+ break;
+ if (urel->r_addend <= 3)
+ flags |= 1 << urel->r_addend;
+ }
+
+ /* A little preparation for the loop... */
+ disp = symval - info->gp;
+
+ for (urel = irel+1, i = 0; i < count; ++i, ++urel)
+ {
+ unsigned int insn;
+ int insn_disp;
+ bfd_signed_vma xdisp;
+
+ insn = bfd_get_32 (info->abfd, info->contents + urel->r_offset);
+
+ switch (urel->r_addend)
+ {
+ case LITUSE_ALPHA_ADDR:
+ default:
+ /* This type is really just a placeholder to note that all
+ uses cannot be optimized, but to still allow some. */
+ all_optimized = FALSE;
+ break;
+
+ case LITUSE_ALPHA_BASE:
+ /* We can always optimize 16-bit displacements. */
+
+ /* Extract the displacement from the instruction, sign-extending
+ it if necessary, then test whether it is within 16 or 32 bits
+ displacement from GP. */
+ insn_disp = ((insn & 0xffff) ^ 0x8000) - 0x8000;
+
+ xdisp = disp + insn_disp;
+ fits16 = (xdisp >= - (bfd_signed_vma) 0x8000 && xdisp < 0x8000);
+ fits32 = (xdisp >= - (bfd_signed_vma) 0x80000000
+ && xdisp < 0x7fff8000);
+
+ if (fits16)
+ {
+ /* Take the op code and dest from this insn, take the base
+ register from the literal insn. Leave the offset alone. */
+ insn = (insn & 0xffe0ffff) | (lit_insn & 0x001f0000);
+ urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
+ R_ALPHA_GPREL16);
+ urel->r_addend = irel->r_addend;
+ info->changed_relocs = TRUE;
+
+ bfd_put_32 (info->abfd, (bfd_vma) insn,
+ info->contents + urel->r_offset);
+ info->changed_contents = TRUE;
+ }
+
+ /* If all mem+byte, we can optimize 32-bit mem displacements. */
+ else if (fits32 && !(flags & ~6))
+ {
+ /* FIXME: sanity check that lit insn Ra is mem insn Rb. */
+
+ irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
+ R_ALPHA_GPRELHIGH);
+ lit_insn = (OP_LDAH << 26) | (lit_insn & 0x03ff0000);
+ bfd_put_32 (info->abfd, (bfd_vma) lit_insn,
+ info->contents + irel->r_offset);
+ lit_reused = TRUE;
+ info->changed_contents = TRUE;
+
+ urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
+ R_ALPHA_GPRELLOW);
+ urel->r_addend = irel->r_addend;
+ info->changed_relocs = TRUE;
+ }
+ else
+ all_optimized = FALSE;
+ break;
+
+ case LITUSE_ALPHA_BYTOFF:
+ /* We can always optimize byte instructions. */
+
+ /* FIXME: sanity check the insn for byte op. Check that the
+ literal dest reg is indeed Rb in the byte insn. */
+
+ insn &= ~ (unsigned) 0x001ff000;
+ insn |= ((symval & 7) << 13) | 0x1000;
+
+ urel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+ urel->r_addend = 0;
+ info->changed_relocs = TRUE;
+
+ bfd_put_32 (info->abfd, (bfd_vma) insn,
+ info->contents + urel->r_offset);
+ info->changed_contents = TRUE;
+ break;
+
+ case LITUSE_ALPHA_JSR:
+ case LITUSE_ALPHA_TLSGD:
+ case LITUSE_ALPHA_TLSLDM:
+ {
+ bfd_vma optdest, org;
+ bfd_signed_vma odisp;
+
+ /* For undefined weak symbols, we're mostly interested in getting
+ rid of the got entry whenever possible, so optimize this to a
+ use of the zero register. */
+ if (info->h && info->h->root.root.type == bfd_link_hash_undefweak)
+ {
+ insn |= 31 << 16;
+ bfd_put_32 (info->abfd, (bfd_vma) insn,
+ info->contents + urel->r_offset);
+
+ info->changed_contents = TRUE;
+ break;
+ }
+
+ /* If not zero, place to jump without needing pv. */
+ optdest = elf64_alpha_relax_opt_call (info, symval);
+ org = (info->sec->output_section->vma
+ + info->sec->output_offset
+ + urel->r_offset + 4);
+ odisp = (optdest ? optdest : symval) - org;
+
+ if (odisp >= -0x400000 && odisp < 0x400000)
+ {
+ Elf_Internal_Rela *xrel;
+
+ /* Preserve branch prediction call stack when possible. */
+ if ((insn & INSN_JSR_MASK) == INSN_JSR)
+ insn = (OP_BSR << 26) | (insn & 0x03e00000);
+ else
+ insn = (OP_BR << 26) | (insn & 0x03e00000);
+
+ urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),
+ R_ALPHA_BRADDR);
+ urel->r_addend = irel->r_addend;
+
+ if (optdest)
+ urel->r_addend += optdest - symval;
+ else
+ all_optimized = FALSE;
+
+ bfd_put_32 (info->abfd, (bfd_vma) insn,
+ info->contents + urel->r_offset);
+
+ /* Kill any HINT reloc that might exist for this insn. */
+ xrel = (elf64_alpha_find_reloc_at_ofs
+ (info->relocs, info->relend, urel->r_offset,
+ R_ALPHA_HINT));
+ if (xrel)
+ xrel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+
+ info->changed_contents = TRUE;
+ info->changed_relocs = TRUE;
+ }
+ else
+ all_optimized = FALSE;
+
+ /* Even if the target is not in range for a direct branch,
+ if we share a GP, we can eliminate the gp reload. */
+ if (optdest)
+ {
+ Elf_Internal_Rela *gpdisp
+ = (elf64_alpha_find_reloc_at_ofs
+ (info->relocs, irelend, urel->r_offset + 4,
+ R_ALPHA_GPDISP));
+ if (gpdisp)
+ {
+ bfd_byte *p_ldah = info->contents + gpdisp->r_offset;
+ bfd_byte *p_lda = p_ldah + gpdisp->r_addend;
+ unsigned int ldah = bfd_get_32 (info->abfd, p_ldah);
+ unsigned int lda = bfd_get_32 (info->abfd, p_lda);
+
+ /* Verify that the instruction is "ldah $29,0($26)".
+ Consider a function that ends in a noreturn call,
+ and that the next function begins with an ldgp,
+ and that by accident there is no padding between.
+ In that case the insn would use $27 as the base. */
+ if (ldah == 0x27ba0000 && lda == 0x23bd0000)
+ {
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, p_ldah);
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, p_lda);
+
+ gpdisp->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+ info->changed_contents = TRUE;
+ info->changed_relocs = TRUE;
+ }
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ /* If all cases were optimized, we can reduce the use count on this
+ got entry by one, possibly eliminating it. */
+ if (all_optimized)
+ {
+ if (--info->gotent->use_count == 0)
+ {
+ int sz = alpha_got_entry_size (R_ALPHA_LITERAL);
+ alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
+ if (!info->h)
+ alpha_elf_tdata (info->gotobj)->local_got_size -= sz;
+ }
+
+ /* If the literal instruction is no longer needed (it may have been
+ reused. We can eliminate it. */
+ /* ??? For now, I don't want to deal with compacting the section,
+ so just nop it out. */
+ if (!lit_reused)
+ {
+ irel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+ info->changed_relocs = TRUE;
+
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP,
+ info->contents + irel->r_offset);
+ info->changed_contents = TRUE;
+ }
+
+ return TRUE;
+ }
+ else
+ return elf64_alpha_relax_got_load (info, symval, irel, R_ALPHA_LITERAL);
+}
+
+static bfd_boolean
+elf64_alpha_relax_tls_get_addr (struct alpha_relax_info *info, bfd_vma symval,
+ Elf_Internal_Rela *irel, bfd_boolean is_gd)
+{
+ bfd_byte *pos[5];
+ unsigned int insn;
+ Elf_Internal_Rela *gpdisp, *hint;
+ bfd_boolean dynamic, use_gottprel, pos1_unusable;
+ unsigned long new_symndx;
+
+ dynamic = alpha_elf_dynamic_symbol_p (&info->h->root, info->link_info);
+
+ /* If a TLS symbol is accessed using IE at least once, there is no point
+ to use dynamic model for it. */
+ if (is_gd && info->h && (info->h->flags & ALPHA_ELF_LINK_HASH_TLS_IE))
+ ;
+
+ /* If the symbol is local, and we've already committed to DF_STATIC_TLS,
+ then we might as well relax to IE. */
+ else if (info->link_info->shared && !dynamic
+ && (info->link_info->flags & DF_STATIC_TLS))
+ ;
+
+ /* Otherwise we must be building an executable to do anything. */
+ else if (info->link_info->shared)
+ return TRUE;
+
+ /* The TLSGD/TLSLDM relocation must be followed by a LITERAL and
+ the matching LITUSE_TLS relocations. */
+ if (irel + 2 >= info->relend)
+ return TRUE;
+ if (ELF64_R_TYPE (irel[1].r_info) != R_ALPHA_LITERAL
+ || ELF64_R_TYPE (irel[2].r_info) != R_ALPHA_LITUSE
+ || irel[2].r_addend != (is_gd ? LITUSE_ALPHA_TLSGD : LITUSE_ALPHA_TLSLDM))
+ return TRUE;
+
+ /* There must be a GPDISP relocation positioned immediately after the
+ LITUSE relocation. */
+ gpdisp = elf64_alpha_find_reloc_at_ofs (info->relocs, info->relend,
+ irel[2].r_offset + 4, R_ALPHA_GPDISP);
+ if (!gpdisp)
+ return TRUE;
+
+ pos[0] = info->contents + irel[0].r_offset;
+ pos[1] = info->contents + irel[1].r_offset;
+ pos[2] = info->contents + irel[2].r_offset;
+ pos[3] = info->contents + gpdisp->r_offset;
+ pos[4] = pos[3] + gpdisp->r_addend;
+ pos1_unusable = FALSE;
+
+ /* Generally, the positions are not allowed to be out of order, lest the
+ modified insn sequence have different register lifetimes. We can make
+ an exception when pos 1 is adjacent to pos 0. */
+ if (pos[1] + 4 == pos[0])
+ {
+ bfd_byte *tmp = pos[0];
+ pos[0] = pos[1];
+ pos[1] = tmp;
+ }
+ else if (pos[1] < pos[0])
+ pos1_unusable = TRUE;
+ if (pos[1] >= pos[2] || pos[2] >= pos[3])
+ return TRUE;
+
+ /* Reduce the use count on the LITERAL relocation. Do this before we
+ smash the symndx when we adjust the relocations below. */
+ {
+ struct alpha_elf_got_entry *lit_gotent;
+ struct alpha_elf_link_hash_entry *lit_h;
+ unsigned long indx;
+
+ BFD_ASSERT (ELF64_R_SYM (irel[1].r_info) >= info->symtab_hdr->sh_info);
+ indx = ELF64_R_SYM (irel[1].r_info) - info->symtab_hdr->sh_info;
+ lit_h = alpha_elf_sym_hashes (info->abfd)[indx];
+
+ while (lit_h->root.root.type == bfd_link_hash_indirect
+ || lit_h->root.root.type == bfd_link_hash_warning)
+ lit_h = (struct alpha_elf_link_hash_entry *) lit_h->root.root.u.i.link;
+
+ for (lit_gotent = lit_h->got_entries; lit_gotent ;
+ lit_gotent = lit_gotent->next)
+ if (lit_gotent->gotobj == info->gotobj
+ && lit_gotent->reloc_type == R_ALPHA_LITERAL
+ && lit_gotent->addend == irel[1].r_addend)
+ break;
+ BFD_ASSERT (lit_gotent);
+
+ if (--lit_gotent->use_count == 0)
+ {
+ int sz = alpha_got_entry_size (R_ALPHA_LITERAL);
+ alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
+ }
+ }
+
+ /* Change
+
+ lda $16,x($gp) !tlsgd!1
+ ldq $27,__tls_get_addr($gp) !literal!1
+ jsr $26,($27),__tls_get_addr !lituse_tlsgd!1
+ ldah $29,0($26) !gpdisp!2
+ lda $29,0($29) !gpdisp!2
+ to
+ ldq $16,x($gp) !gottprel
+ unop
+ call_pal rduniq
+ addq $16,$0,$0
+ unop
+ or the first pair to
+ lda $16,x($gp) !tprel
+ unop
+ or
+ ldah $16,x($gp) !tprelhi
+ lda $16,x($16) !tprello
+
+ as appropriate. */
+
+ use_gottprel = FALSE;
+ new_symndx = is_gd ? ELF64_R_SYM (irel->r_info) : 0;
+ switch (!dynamic && !info->link_info->shared)
+ {
+ case 1:
+ {
+ bfd_vma tp_base;
+ bfd_signed_vma disp;
+
+ BFD_ASSERT (elf_hash_table (info->link_info)->tls_sec != NULL);
+ tp_base = alpha_get_tprel_base (info->link_info);
+ disp = symval - tp_base;
+
+ if (disp >= -0x8000 && disp < 0x8000)
+ {
+ insn = (OP_LDA << 26) | (16 << 21) | (31 << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
+
+ irel[0].r_offset = pos[0] - info->contents;
+ irel[0].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_TPREL16);
+ irel[1].r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+ break;
+ }
+ else if (disp >= -(bfd_signed_vma) 0x80000000
+ && disp < (bfd_signed_vma) 0x7fff8000
+ && !pos1_unusable)
+ {
+ insn = (OP_LDAH << 26) | (16 << 21) | (31 << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
+ insn = (OP_LDA << 26) | (16 << 21) | (16 << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[1]);
+
+ irel[0].r_offset = pos[0] - info->contents;
+ irel[0].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_TPRELHI);
+ irel[1].r_offset = pos[1] - info->contents;
+ irel[1].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_TPRELLO);
+ break;
+ }
+ }
+ /* FALLTHRU */
+
+ default:
+ use_gottprel = TRUE;
+
+ insn = (OP_LDQ << 26) | (16 << 21) | (29 << 16);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[0]);
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[1]);
+
+ irel[0].r_offset = pos[0] - info->contents;
+ irel[0].r_info = ELF64_R_INFO (new_symndx, R_ALPHA_GOTTPREL);
+ irel[1].r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+ break;
+ }
+
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_RDUNIQ, pos[2]);
+
+ insn = INSN_ADDQ | (16 << 21) | (0 << 16) | (0 << 0);
+ bfd_put_32 (info->abfd, (bfd_vma) insn, pos[3]);
+
+ bfd_put_32 (info->abfd, (bfd_vma) INSN_UNOP, pos[4]);
+
+ irel[2].r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+ gpdisp->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+
+ hint = elf64_alpha_find_reloc_at_ofs (info->relocs, info->relend,
+ irel[2].r_offset, R_ALPHA_HINT);
+ if (hint)
+ hint->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);
+
+ info->changed_contents = TRUE;
+ info->changed_relocs = TRUE;
+
+ /* Reduce the use count on the TLSGD/TLSLDM relocation. */
+ if (--info->gotent->use_count == 0)
+ {
+ int sz = alpha_got_entry_size (info->gotent->reloc_type);
+ alpha_elf_tdata (info->gotobj)->total_got_size -= sz;
+ if (!info->h)
+ alpha_elf_tdata (info->gotobj)->local_got_size -= sz;
+ }
+
+ /* If we've switched to a GOTTPREL relocation, increment the reference
+ count on that got entry. */
+ if (use_gottprel)
+ {
+ struct alpha_elf_got_entry *tprel_gotent;
+
+ for (tprel_gotent = *info->first_gotent; tprel_gotent ;
+ tprel_gotent = tprel_gotent->next)
+ if (tprel_gotent->gotobj == info->gotobj
+ && tprel_gotent->reloc_type == R_ALPHA_GOTTPREL
+ && tprel_gotent->addend == irel->r_addend)
+ break;
+ if (tprel_gotent)
+ tprel_gotent->use_count++;
+ else
+ {
+ if (info->gotent->use_count == 0)
+ tprel_gotent = info->gotent;
+ else
+ {
+ tprel_gotent = (struct alpha_elf_got_entry *)
+ bfd_alloc (info->abfd, sizeof (struct alpha_elf_got_entry));
+ if (!tprel_gotent)
+ return FALSE;
+
+ tprel_gotent->next = *info->first_gotent;
+ *info->first_gotent = tprel_gotent;
+
+ tprel_gotent->gotobj = info->gotobj;
+ tprel_gotent->addend = irel->r_addend;
+ tprel_gotent->got_offset = -1;
+ tprel_gotent->reloc_done = 0;
+ tprel_gotent->reloc_xlated = 0;
+ }
+
+ tprel_gotent->use_count = 1;
+ tprel_gotent->reloc_type = R_ALPHA_GOTTPREL;
+ }
+ }
+
+ return TRUE;
+}
+
+static bfd_boolean
+elf64_alpha_relax_section (bfd *abfd, asection *sec,
+ struct bfd_link_info *link_info, bfd_boolean *again)
+{
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Rela *internal_relocs;
+ Elf_Internal_Rela *irel, *irelend;
+ Elf_Internal_Sym *isymbuf = NULL;
+ struct alpha_elf_got_entry **local_got_entries;
+ struct alpha_relax_info info;
+
+ /* We are not currently changing any sizes, so only one pass. */
+ *again = FALSE;
+
+ if (link_info->relocatable
+ || ((sec->flags & (SEC_CODE | SEC_RELOC | SEC_ALLOC))
+ != (SEC_CODE | SEC_RELOC | SEC_ALLOC))
+ || sec->reloc_count == 0)
+ return TRUE;
+
+ symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
+ local_got_entries = alpha_elf_tdata(abfd)->local_got_entries;
+
+ /* Load the relocations for this section. */
+ internal_relocs = (_bfd_elf_link_read_relocs
+ (abfd, sec, (PTR) NULL, (Elf_Internal_Rela *) NULL,
+ link_info->keep_memory));
+ if (internal_relocs == NULL)
+ return FALSE;
+
+ memset(&info, 0, sizeof (info));
+ info.abfd = abfd;
+ info.sec = sec;
+ info.link_info = link_info;
+ info.symtab_hdr = symtab_hdr;
+ info.relocs = internal_relocs;
+ info.relend = irelend = internal_relocs + sec->reloc_count;
+
+ /* Find the GP for this object. Do not store the result back via
+ _bfd_set_gp_value, since this could change again before final. */
+ info.gotobj = alpha_elf_tdata (abfd)->gotobj;
+ if (info.gotobj)
+ {
+ asection *sgot = alpha_elf_tdata (info.gotobj)->got;
+ info.gp = (sgot->output_section->vma
+ + sgot->output_offset
+ + 0x8000);
+ }
+
+ /* Get the section contents. */
+ if (elf_section_data (sec)->this_hdr.contents != NULL)
+ info.contents = elf_section_data (sec)->this_hdr.contents;
+ else
+ {
+ if (!bfd_malloc_and_get_section (abfd, sec, &info.contents))
+ goto error_return;
+ }
+
+ for (irel = internal_relocs; irel < irelend; irel++)
+ {
+ bfd_vma symval;
+ struct alpha_elf_got_entry *gotent;
+ unsigned long r_type = ELF64_R_TYPE (irel->r_info);
+ unsigned long r_symndx = ELF64_R_SYM (irel->r_info);
+
+ /* Early exit for unhandled or unrelaxable relocations. */
+ switch (r_type)
+ {
+ case R_ALPHA_LITERAL:
+ case R_ALPHA_GPRELHIGH:
+ case R_ALPHA_GPRELLOW:
+ case R_ALPHA_GOTDTPREL:
+ case R_ALPHA_GOTTPREL:
+ case R_ALPHA_TLSGD:
+ break;
+
+ case R_ALPHA_TLSLDM:
+ /* The symbol for a TLSLDM reloc is ignored. Collapse the
+ reloc to the 0 symbol so that they all match. */
+ r_symndx = 0;
+ break;
+
+ default:
+ continue;
+ }
+
+ /* Get the value of the symbol referred to by the reloc. */
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ /* A local symbol. */
+ Elf_Internal_Sym *isym;
+
+ /* Read this BFD's local symbols. */
+ if (isymbuf == NULL)
+ {
+ isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (isymbuf == NULL)
+ isymbuf = bfd_elf_get_elf_syms (abfd, symtab_hdr,
+ symtab_hdr->sh_info, 0,
+ NULL, NULL, NULL);
+ if (isymbuf == NULL)
+ goto error_return;
+ }
+
+ isym = isymbuf + r_symndx;
+
+ /* Given the symbol for a TLSLDM reloc is ignored, this also
+ means forcing the symbol value to the tp base. */
+ if (r_type == R_ALPHA_TLSLDM)
+ {
+ info.tsec = bfd_abs_section_ptr;
+ symval = alpha_get_tprel_base (info.link_info);
+ }
+ else
+ {
+ symval = isym->st_value;
+ if (isym->st_shndx == SHN_UNDEF)
+ continue;
+ else if (isym->st_shndx == SHN_ABS)
+ info.tsec = bfd_abs_section_ptr;
+ else if (isym->st_shndx == SHN_COMMON)
+ info.tsec = bfd_com_section_ptr;
+ else
+ info.tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
+ }
+
+ info.h = NULL;
+ info.other = isym->st_other;
+ if (local_got_entries)
+ info.first_gotent = &local_got_entries[r_symndx];
+ else
+ {
+ info.first_gotent = &info.gotent;
+ info.gotent = NULL;
+ }
+ }
+ else
+ {
+ unsigned long indx;
+ struct alpha_elf_link_hash_entry *h;
+
+ indx = r_symndx - symtab_hdr->sh_info;
+ h = alpha_elf_sym_hashes (abfd)[indx];
+ BFD_ASSERT (h != NULL);
+
+ while (h->root.root.type == bfd_link_hash_indirect
+ || h->root.root.type == bfd_link_hash_warning)
+ h = (struct alpha_elf_link_hash_entry *)h->root.root.u.i.link;
+
+ /* If the symbol is undefined, we can't do anything with it. */
+ if (h->root.root.type == bfd_link_hash_undefined)
+ continue;
+
+ /* If the symbol isn't defined in the current module,
+ again we can't do anything. */
+ if (h->root.root.type == bfd_link_hash_undefweak)
+ {
+ info.tsec = bfd_abs_section_ptr;
+ symval = 0;
+ }
+ else if (!h->root.def_regular)
+ {
+ /* Except for TLSGD relocs, which can sometimes be
+ relaxed to GOTTPREL relocs. */
+ if (r_type != R_ALPHA_TLSGD)
+ continue;
+ info.tsec = bfd_abs_section_ptr;
+ symval = 0;
+ }
+ else
+ {
+ info.tsec = h->root.root.u.def.section;
+ symval = h->root.root.u.def.value;
+ }
+
+ info.h = h;
+ info.other = h->root.other;
+ info.first_gotent = &h->got_entries;
+ }
+
+ /* Search for the got entry to be used by this relocation. */
+ for (gotent = *info.first_gotent; gotent ; gotent = gotent->next)
+ if (gotent->gotobj == info.gotobj
+ && gotent->reloc_type == r_type
+ && gotent->addend == irel->r_addend)
+ break;
+ info.gotent = gotent;
+
+ symval += info.tsec->output_section->vma + info.tsec->output_offset;
+ symval += irel->r_addend;
+
+ switch (r_type)
+ {
+ case R_ALPHA_LITERAL:
+ BFD_ASSERT(info.gotent != NULL);
+
+ /* If there exist LITUSE relocations immediately following, this
+ opens up all sorts of interesting optimizations, because we
+ now know every location that this address load is used. */
+ if (irel+1 < irelend
+ && ELF64_R_TYPE (irel[1].r_info) == R_ALPHA_LITUSE)
+ {
+ if (!elf64_alpha_relax_with_lituse (&info, symval, irel))
+ goto error_return;
+ }
+ else
+ {
+ if (!elf64_alpha_relax_got_load (&info, symval, irel, r_type))
+ goto error_return;
+ }
+ break;
+ case R_ALPHA_GOTDTPREL:
+ case R_ALPHA_GOTTPREL:
+ BFD_ASSERT(info.gotent != NULL);
+ if (!elf64_alpha_relax_got_load (&info, symval, irel, r_type))
+ goto error_return;
+ break;
+
+ case R_ALPHA_TLSGD:
+ case R_ALPHA_TLSLDM:
+ BFD_ASSERT(info.gotent != NULL);
+ if (!elf64_alpha_relax_tls_get_addr (&info, symval, irel,
+ r_type == R_ALPHA_TLSGD))
+ goto error_return;
+ break;
+ }
+ }
+
+ if (!elf64_alpha_size_plt_section (link_info))
+ return FALSE;
+ if (!elf64_alpha_size_got_sections (link_info))
+ return FALSE;
+ if (!elf64_alpha_size_rela_got_section (link_info))
+ return FALSE;
+
+ if (isymbuf != NULL
+ && symtab_hdr->contents != (unsigned char *) isymbuf)
+ {
+ if (!link_info->keep_memory)
+ free (isymbuf);
+ else
+ {
+ /* Cache the symbols for elf_link_input_bfd. */
+ symtab_hdr->contents = (unsigned char *) isymbuf;
+ }
+ }
+
+ if (info.contents != NULL
+ && elf_section_data (sec)->this_hdr.contents != info.contents)
+ {
+ if (!info.changed_contents && !link_info->keep_memory)
+ free (info.contents);
+ else
+ {
+ /* Cache the section contents for elf_link_input_bfd. */
+ elf_section_data (sec)->this_hdr.contents = info.contents;
+ }
+ }
+
+ if (elf_section_data (sec)->relocs != internal_relocs)
+ {
+ if (!info.changed_relocs)
+ free (internal_relocs);
+ else
+ elf_section_data (sec)->relocs = internal_relocs;
+ }
+
+ *again = info.changed_contents || info.changed_relocs;
+
+ return TRUE;
+
+ error_return:
+ if (isymbuf != NULL
+ && symtab_hdr->contents != (unsigned char *) isymbuf)
+ free (isymbuf);
+ if (info.contents != NULL
+ && elf_section_data (sec)->this_hdr.contents != info.contents)
+ free (info.contents);
+ if (internal_relocs != NULL
+ && elf_section_data (sec)->relocs != internal_relocs)
+ free (internal_relocs);
+ return FALSE;
+}
+
/* Emit a dynamic relocation for (DYNINDX, RTYPE, ADDEND) at (SEC, OFFSET)
into the next available slot in SREL. */
static void
-elf64_alpha_emit_dynrel (abfd, info, sec, srel, offset, dynindx, rtype, addend)
- bfd *abfd;
- struct bfd_link_info *info;
- asection *sec, *srel;
- bfd_vma offset, addend;
- long dynindx, rtype;
+elf64_alpha_emit_dynrel (bfd *abfd, struct bfd_link_info *info,
+ asection *sec, asection *srel, bfd_vma offset,
+ long dynindx, long rtype, bfd_vma addend)
{
Elf_Internal_Rela outrel;
bfd_byte *loc;
@@ -4200,16 +3927,13 @@ elf64_alpha_emit_dynrel (abfd, info, sec, srel, offset, dynindx, rtype, addend)
symbol winds up in the output section. */
static bfd_boolean
-elf64_alpha_relocate_section_r (output_bfd, info, input_bfd, input_section,
- contents, relocs, local_syms, local_sections)
- bfd *output_bfd ATTRIBUTE_UNUSED;
- struct bfd_link_info *info ATTRIBUTE_UNUSED;
- bfd *input_bfd;
- asection *input_section;
- bfd_byte *contents ATTRIBUTE_UNUSED;
- Elf_Internal_Rela *relocs;
- Elf_Internal_Sym *local_syms;
- asection **local_sections;
+elf64_alpha_relocate_section_r (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ bfd *input_bfd, asection *input_section,
+ bfd_byte *contents ATTRIBUTE_UNUSED,
+ Elf_Internal_Rela *relocs,
+ Elf_Internal_Sym *local_syms,
+ asection **local_sections)
{
unsigned long symtab_hdr_sh_info;
Elf_Internal_Rela *rel;
@@ -4261,16 +3985,11 @@ elf64_alpha_relocate_section_r (output_bfd, info, input_bfd, input_section,
/* Relocate an Alpha ELF section. */
static bfd_boolean
-elf64_alpha_relocate_section (output_bfd, info, input_bfd, input_section,
- contents, relocs, local_syms, local_sections)
- bfd *output_bfd;
- struct bfd_link_info *info;
- bfd *input_bfd;
- asection *input_section;
- bfd_byte *contents;
- Elf_Internal_Rela *relocs;
- Elf_Internal_Sym *local_syms;
- asection **local_sections;
+elf64_alpha_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
+ bfd *input_bfd, asection *input_section,
+ bfd_byte *contents, Elf_Internal_Rela *relocs,
+ Elf_Internal_Sym *local_syms,
+ asection **local_sections)
{
Elf_Internal_Shdr *symtab_hdr;
Elf_Internal_Rela *rel;
@@ -4902,11 +4621,9 @@ elf64_alpha_relocate_section (output_bfd, info, input_bfd, input_section,
dynamic sections here. */
static bfd_boolean
-elf64_alpha_finish_dynamic_symbol (output_bfd, info, h, sym)
- bfd *output_bfd;
- struct bfd_link_info *info;
- struct elf_link_hash_entry *h;
- Elf_Internal_Sym *sym;
+elf64_alpha_finish_dynamic_symbol (bfd *output_bfd, struct bfd_link_info *info,
+ struct elf_link_hash_entry *h,
+ Elf_Internal_Sym *sym)
{
bfd *dynobj = elf_hash_table(info)->dynobj;
@@ -5064,9 +4781,8 @@ elf64_alpha_finish_dynamic_symbol (output_bfd, info, h, sym)
/* Finish up the dynamic sections. */
static bfd_boolean
-elf64_alpha_finish_dynamic_sections (output_bfd, info)
- bfd *output_bfd;
- struct bfd_link_info *info;
+elf64_alpha_finish_dynamic_sections (bfd *output_bfd,
+ struct bfd_link_info *info)
{
bfd *dynobj;
asection *sdyn;
@@ -5153,9 +4869,7 @@ elf64_alpha_finish_dynamic_sections (output_bfd, info)
them all out sequentially. */
static bfd_boolean
-elf64_alpha_final_link (abfd, info)
- bfd *abfd;
- struct bfd_link_info *info;
+elf64_alpha_final_link (bfd *abfd, struct bfd_link_info *info)
{
asection *o;
struct bfd_link_order *p;
@@ -5422,8 +5136,7 @@ elf64_alpha_final_link (abfd, info)
}
static enum elf_reloc_type_class
-elf64_alpha_reloc_type_class (rela)
- const Elf_Internal_Rela *rela;
+elf64_alpha_reloc_type_class (const Elf_Internal_Rela *rela)
{
switch ((int) ELF64_R_TYPE (rela->r_info))
{
@@ -5638,13 +5351,9 @@ static const struct elf_size_info alpha_elf_size_info =
"FreeBSD" label in the ELF header. So we put this label on all
executables and (for simplicity) also all other object files. */
-static void elf64_alpha_fbsd_post_process_headers
- PARAMS ((bfd *, struct bfd_link_info *));
-
static void
-elf64_alpha_fbsd_post_process_headers (abfd, link_info)
- bfd * abfd;
- struct bfd_link_info * link_info ATTRIBUTE_UNUSED;
+elf64_alpha_fbsd_post_process_headers (bfd * abfd,
+ struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
{
Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */