aboutsummaryrefslogtreecommitdiff
path: root/bfd
diff options
context:
space:
mode:
authorJiong Wang <jiong.wang@arm.com>2014-10-24 11:39:35 +0100
committerJiong Wang <jiong.wang@arm.com>2014-10-24 11:39:35 +0100
commit68fcca92b735bb46e38331485ac2e933e5876b83 (patch)
tree586a3f1bf8c983437c2f084e078cd6402a930e7c /bfd
parent79ccd89e582a1159a503813be020d044e070d53f (diff)
downloadgdb-68fcca92b735bb46e38331485ac2e933e5876b83.zip
gdb-68fcca92b735bb46e38331485ac2e933e5876b83.tar.gz
gdb-68fcca92b735bb46e38331485ac2e933e5876b83.tar.bz2
[AArch64] Cortex-A53 erratum 835769 linker workaround
2014-10-22 Tejas Belagod <tejas.belagod@arm.com> bfd/ * bfd-in.h (bfd_elf64_aarch64_set_options): Add a parameter. * bfd-in2.h (bfd_elf64_aarch64_set_options): Likewise. * elfnn-aarch64.c (aarch64_erratum_835769_stub): New. (elf_aarch64_stub_type): Add new type aarch64_stub_erratum_835769_veneer. (elf_aarch64_stub_hash_entry): New fields for erratum 835769. (aarch64_erratum_835769_fix): New data struct to record erratum 835769. (elf_aarch64_link_hash_table: Global flags for 835769. (aarch64_build_one_stub): Add case for 835769. (aarch64_size_one_stub): Likewise. (aarch64_mem_op_p, aarch64_mlxl_p, aarch64_erratum_sequence,erratum_835769_scan): New. Decode and scan functions for erratum 835769. (elf_aarch64_create_or_find_stub_sec): New. (elfNN_aarch64_size_stubs): Look for erratum 835769 and record them. (bfd_elfNN_aarch64_set_options: Set global flag for 835769. (erratum_835769_branch_to_stub_data, make_branch_to_erratum_835769_stub):New. Connect up all the erratum stubs to occurances by branches. (elfNN_aarch64_write_section): New hook. (aarch64_map_one_stub): Output erratum stub symbol. (elfNN_aarch64_size_dynamic_sections): Init mapping symbol information for erratum 835769. (elf_backend_write_section): Define. ld/ * emultempl/aarch64elf.em: Add command-line option for erratum 835769. ld/testsuite/ * ld-aarch64/aarch64-elf.exp (aarch64elftests): Drive erratum 835769 tests. * ld-aarch64/erratum835769.d: New. * ld-aarch64/erratum835769.s: New.
Diffstat (limited to 'bfd')
-rw-r--r--bfd/ChangeLog29
-rw-r--r--bfd/bfd-in.h4
-rw-r--r--bfd/bfd-in2.h4
-rw-r--r--bfd/elfnn-aarch64.c641
4 files changed, 671 insertions, 7 deletions
diff --git a/bfd/ChangeLog b/bfd/ChangeLog
index 79bc784..04b6d86 100644
--- a/bfd/ChangeLog
+++ b/bfd/ChangeLog
@@ -1,3 +1,32 @@
+2014-10-24 Tejas Belagod <tejas.belagod@arm.com>
+
+ * bfd-in.h (bfd_elf64_aarch64_set_options): Add a parameter.
+ * bfd-in2.h (bfd_elf64_aarch64_set_options): Likewise.
+ * elfnn-aarch64.c (aarch64_erratum_835769_stub): New.
+ (elf_aarch64_stub_type): Add new type
+ aarch64_stub_erratum_835769_veneer.
+ (elf_aarch64_stub_hash_entry): New fields for erratum 835769.
+ (aarch64_erratum_835769_fix): New data struct to record erratum
+ 835769.
+ (elf_aarch64_link_hash_table: Global flags for 835769.
+ (aarch64_build_one_stub): Add case for 835769.
+ (aarch64_size_one_stub): Likewise.
+ (aarch64_mem_op_p, aarch64_mlxl_p,
+ aarch64_erratum_sequence,erratum_835769_scan):
+ New. Decode and scan functions for erratum 835769.
+ (elf_aarch64_create_or_find_stub_sec): New.
+ (elfNN_aarch64_size_stubs): Look for erratum 835769 and record
+ them.
+ (bfd_elfNN_aarch64_set_options: Set global flag for 835769.
+ (erratum_835769_branch_to_stub_data,
+ make_branch_to_erratum_835769_stub):New. Connect up all the
+ erratum stubs to occurances by branches.
+ (elfNN_aarch64_write_section): New hook.
+ (aarch64_map_one_stub): Output erratum stub symbol.
+ (elfNN_aarch64_size_dynamic_sections): Init mapping symbol
+ information for erratum 835769.
+ (elf_backend_write_section): Define.
+
2014-10-23 Victor Kamensky <victor.kamensky@linaro.org>
* elf32-arm.c (read_code32): New function to read 32 bit
diff --git a/bfd/bfd-in.h b/bfd/bfd-in.h
index bcb9fdc..1f80a76 100644
--- a/bfd/bfd-in.h
+++ b/bfd/bfd-in.h
@@ -937,10 +937,10 @@ extern void bfd_elf32_aarch64_init_maps
(bfd *);
extern void bfd_elf64_aarch64_set_options
- (bfd *, struct bfd_link_info *, int, int, int);
+ (bfd *, struct bfd_link_info *, int, int, int, int);
extern void bfd_elf32_aarch64_set_options
- (bfd *, struct bfd_link_info *, int, int, int);
+ (bfd *, struct bfd_link_info *, int, int, int, int);
/* ELF AArch64 mapping symbol support. */
#define BFD_AARCH64_SPECIAL_SYM_TYPE_MAP (1 << 0)
diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h
index 928aa35..c7a2bb5 100644
--- a/bfd/bfd-in2.h
+++ b/bfd/bfd-in2.h
@@ -944,10 +944,10 @@ extern void bfd_elf32_aarch64_init_maps
(bfd *);
extern void bfd_elf64_aarch64_set_options
- (bfd *, struct bfd_link_info *, int, int, int);
+ (bfd *, struct bfd_link_info *, int, int, int, int);
extern void bfd_elf32_aarch64_set_options
- (bfd *, struct bfd_link_info *, int, int, int);
+ (bfd *, struct bfd_link_info *, int, int, int, int);
/* ELF AArch64 mapping symbol support. */
#define BFD_AARCH64_SPECIAL_SYM_TYPE_MAP (1 << 0)
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index 9e00069..19ad4d2 100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -1611,6 +1611,12 @@ static const uint32_t aarch64_long_branch_stub[] =
0x00000000,
};
+static const uint32_t aarch64_erratum_835769_stub[] =
+{
+ 0x00000000, /* Placeholder for multiply accumulate. */
+ 0x14000000, /* b <label> */
+};
+
/* Section name for stubs is the associated section name plus this
string. */
#define STUB_SUFFIX ".stub"
@@ -1620,6 +1626,7 @@ enum elf_aarch64_stub_type
aarch64_stub_none,
aarch64_stub_adrp_branch,
aarch64_stub_long_branch,
+ aarch64_stub_erratum_835769_veneer,
};
struct elf_aarch64_stub_hash_entry
@@ -1654,6 +1661,10 @@ struct elf_aarch64_stub_hash_entry
stub name in the hash table has to be unique; this does not, so
it can be friendlier. */
char *output_name;
+
+ /* The instruction which caused this stub to be generated (only valid for
+ erratum 835769 workaround stubs at present). */
+ uint32_t veneered_insn;
};
/* Used to build a map of a section. This is required for mixed-endian
@@ -1679,6 +1690,17 @@ _aarch64_elf_section_data;
#define elf_aarch64_section_data(sec) \
((_aarch64_elf_section_data *) elf_section_data (sec))
+/* A fix-descriptor for erratum 835769. */
+struct aarch64_erratum_835769_fix
+{
+ bfd *input_bfd;
+ asection *section;
+ bfd_vma offset;
+ uint32_t veneered_insn;
+ char *stub_name;
+ enum elf_aarch64_stub_type stub_type;
+};
+
/* The size of the thread control block which is defined to be two pointers. */
#define TCB_SIZE (ARCH_SIZE/8)*2
@@ -1799,6 +1821,15 @@ struct elf_aarch64_link_hash_table
/* Nonzero to force PIC branch veneers. */
int pic_veneer;
+ /* Fix erratum 835769. */
+ int fix_erratum_835769;
+
+ /* A table of fix locations for erratum 835769. This holds erratum
+ fix locations between elfNN_aarch64_size_stubs() and
+ elfNN_aarch64_write_section(). */
+ struct aarch64_erratum_835769_fix *aarch64_erratum_835769_fixes;
+ unsigned int num_aarch64_erratum_835769_fixes;
+
/* The number of bytes in the initial entry in the PLT. */
bfd_size_type plt_header_size;
@@ -2343,6 +2374,9 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
bfd *stub_bfd;
bfd_byte *loc;
bfd_vma sym_value;
+ bfd_vma veneered_insn_loc;
+ bfd_vma veneer_entry_loc;
+ bfd_signed_vma branch_offset = 0;
unsigned int template_size;
const uint32_t *template;
unsigned int i;
@@ -2383,6 +2417,10 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
template = aarch64_long_branch_stub;
template_size = sizeof (aarch64_long_branch_stub);
break;
+ case aarch64_stub_erratum_835769_veneer:
+ template = aarch64_erratum_835769_stub;
+ template_size = sizeof (aarch64_erratum_835769_stub);
+ break;
default:
BFD_FAIL ();
return FALSE;
@@ -2425,6 +2463,23 @@ aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
stub_entry->stub_offset + 16,
sym_value + 12, 0);
break;
+
+ case aarch64_stub_erratum_835769_veneer:
+ veneered_insn_loc = stub_entry->target_section->output_section->vma
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_value;
+ veneer_entry_loc = stub_entry->stub_sec->output_section->vma
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_offset;
+ branch_offset = veneered_insn_loc - veneer_entry_loc;
+ branch_offset >>= 2;
+ branch_offset &= 0x3ffffff;
+ bfd_putl32 (stub_entry->veneered_insn,
+ stub_sec->contents + stub_entry->stub_offset);
+ bfd_putl32 (template[1] | branch_offset,
+ stub_sec->contents + stub_entry->stub_offset + 4);
+ break;
+
default:
break;
}
@@ -2453,6 +2508,9 @@ aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
case aarch64_stub_long_branch:
size = sizeof (aarch64_long_branch_stub);
break;
+ case aarch64_stub_erratum_835769_veneer:
+ size = sizeof (aarch64_erratum_835769_stub);
+ break;
default:
BFD_FAIL ();
return FALSE;
@@ -2641,6 +2699,388 @@ group_sections (struct elf_aarch64_link_hash_table *htab,
#undef PREV_SEC
+#define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
+
+#define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
+#define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
+#define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
+#define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
+#define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
+#define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
+
+#define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
+#define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
+#define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
+#define AARCH64_ZR 0x1f
+
+/* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
+ LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
+
+#define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
+#define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
+#define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
+#define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
+#define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
+#define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
+#define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
+#define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
+#define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
+#define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
+#define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
+#define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
+#define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
+#define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
+#define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
+#define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
+#define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
+#define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
+
+/* Classify an INSN if it is indeed a load/store. Return TRUE if INSN
+ is a load/store along with the Rt and Rtn. Return FALSE if not a
+ load/store. */
+
+static bfd_boolean
+aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
+ bfd_boolean *pair, bfd_boolean *load)
+{
+ uint32_t opcode;
+ unsigned int r;
+ uint32_t opc = 0;
+ uint32_t v = 0;
+ uint32_t opc_v = 0;
+
+ /* Bail out quickly if INSN doesn't fall into the the load-store
+ encoding space. */
+ if (!AARCH64_LDST (insn))
+ return FALSE;
+
+ *pair = FALSE;
+ *load = FALSE;
+ if (AARCH64_LDST_EX (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ *rtn = *rt;
+ if (AARCH64_BIT (insn, 21) == 1)
+ {
+ *pair = TRUE;
+ *rtn = AARCH64_RT2 (insn);
+ }
+ *load = AARCH64_LD (insn);
+ return TRUE;
+ }
+ else if (AARCH64_LDST_NAP (insn)
+ || AARCH64_LDSTP_PI (insn)
+ || AARCH64_LDSTP_O (insn)
+ || AARCH64_LDSTP_PRE (insn))
+ {
+ *pair = TRUE;
+ *rt = AARCH64_RT (insn);
+ *rtn = AARCH64_RT2 (insn);
+ *load = AARCH64_LD (insn);
+ return TRUE;
+ }
+ else if (AARCH64_LDST_PCREL (insn)
+ || AARCH64_LDST_UI (insn)
+ || AARCH64_LDST_PIIMM (insn)
+ || AARCH64_LDST_U (insn)
+ || AARCH64_LDST_PREIMM (insn)
+ || AARCH64_LDST_RO (insn)
+ || AARCH64_LDST_UIMM (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ *rtn = *rt;
+ if (AARCH64_LDST_PCREL (insn))
+ *load = TRUE;
+ opc = AARCH64_BITS (insn, 22, 2);
+ v = AARCH64_BIT (insn, 26);
+ opc_v = opc | (v << 2);
+ *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
+ || opc_v == 5 || opc_v == 7);
+ return TRUE;
+ }
+ else if (AARCH64_LDST_SIMD_M (insn)
+ || AARCH64_LDST_SIMD_M_PI (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ *load = AARCH64_BIT (insn, 22);
+ opcode = (insn >> 12) & 0xf;
+ switch (opcode)
+ {
+ case 0:
+ case 2:
+ *rtn = *rt + 3;
+ break;
+
+ case 4:
+ case 6:
+ *rtn = *rt + 2;
+ break;
+
+ case 7:
+ *rtn = *rt;
+ break;
+
+ case 8:
+ case 10:
+ *rtn = *rt + 1;
+ break;
+
+ default:
+ return FALSE;
+ }
+ return TRUE;
+ }
+ else if (AARCH64_LDST_SIMD_S (insn)
+ || AARCH64_LDST_SIMD_S_PI (insn))
+ {
+ *rt = AARCH64_RT (insn);
+ r = (insn >> 21) & 1;
+ *load = AARCH64_BIT (insn, 22);
+ opcode = (insn >> 13) & 0x7;
+ switch (opcode)
+ {
+ case 0:
+ case 2:
+ case 4:
+ *rtn = *rt + r;
+ break;
+
+ case 1:
+ case 3:
+ case 5:
+ *rtn = *rt + (r == 0 ? 2 : 3);
+ break;
+
+ case 6:
+ *rtn = *rt + r;
+ break;
+
+ case 7:
+ *rtn = *rt + (r == 0 ? 2 : 3);
+ break;
+
+ default:
+ return FALSE;
+ }
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/* Return TRUE if INSN is multiply-accumulate. */
+
+static bfd_boolean
+aarch64_mlxl_p (uint32_t insn)
+{
+ uint32_t op31 = AARCH64_OP31 (insn);
+
+ if (AARCH64_MAC (insn)
+ && (op31 == 0 || op31 == 1 || op31 == 5)
+ /* Exclude MUL instructions which are encoded as a multiple accumulate
+ with RA = XZR. */
+ && AARCH64_RA (insn) != AARCH64_ZR)
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
+ it is possible for a 64-bit multiply-accumulate instruction to generate an
+ incorrect result. The details are quite complex and hard to
+ determine statically, since branches in the code may exist in some
+ circumstances, but all cases end with a memory (load, store, or
+ prefetch) instruction followed immediately by the multiply-accumulate
+ operation. We employ a linker patching technique, by moving the potentially
+ affected multiply-accumulate instruction into a patch region and replacing
+ the original instruction with a branch to the patch. This function checks
+ if INSN_1 is the memory operation followed by a multiply-accumulate
+ operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
+ if INSN_1 and INSN_2 are safe. */
+
+static bfd_boolean
+aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
+{
+ uint32_t rt;
+ uint32_t rtn;
+ uint32_t rn;
+ uint32_t rm;
+ uint32_t ra;
+ bfd_boolean pair;
+ bfd_boolean load;
+
+ if (aarch64_mlxl_p (insn_2)
+ && aarch64_mem_op_p (insn_1, &rt, &rtn, &pair, &load))
+ {
+ /* Any SIMD memory op is independent of the subsequent MLA
+ by definition of the erratum. */
+ if (AARCH64_BIT (insn_1, 26))
+ return TRUE;
+
+ /* If not SIMD, check for integer memory ops and MLA relationship. */
+ rn = AARCH64_RN (insn_2);
+ ra = AARCH64_RA (insn_2);
+ rm = AARCH64_RM (insn_2);
+
+ /* If this is a load and there's a true(RAW) dependency, we are safe
+ and this is not an erratum sequence. */
+ if (load &&
+ (rt == rn || rt == rm || rt == ra
+ || (pair && (rtn == rn || rtn == rm || rtn == ra))))
+ return FALSE;
+
+ /* We conservatively put out stubs for all other cases (including
+ writebacks). */
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+static bfd_boolean
+erratum_835769_scan (bfd *input_bfd,
+ struct bfd_link_info *info,
+ struct aarch64_erratum_835769_fix **fixes_p,
+ unsigned int *num_fixes_p,
+ unsigned int *fix_table_size_p)
+{
+ asection *section;
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+ struct aarch64_erratum_835769_fix *fixes = *fixes_p;
+ unsigned int num_fixes = *num_fixes_p;
+ unsigned int fix_table_size = *fix_table_size_p;
+
+ if (htab == NULL)
+ return FALSE;
+
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ bfd_byte *contents = NULL;
+ struct _aarch64_elf_section_data *sec_data;
+ unsigned int span;
+
+ if (elf_section_type (section) != SHT_PROGBITS
+ || (elf_section_flags (section) & SHF_EXECINSTR) == 0
+ || (section->flags & SEC_EXCLUDE) != 0
+ || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
+ || (section->output_section == bfd_abs_section_ptr))
+ continue;
+
+ if (elf_section_data (section)->this_hdr.contents != NULL)
+ contents = elf_section_data (section)->this_hdr.contents;
+ else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
+ return TRUE;
+
+ sec_data = elf_aarch64_section_data (section);
+ for (span = 0; span < sec_data->mapcount; span++)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = ((span == sec_data->mapcount - 1)
+ ? sec_data->map[0].vma + section->size
+ : sec_data->map[span + 1].vma);
+ unsigned int i;
+ char span_type = sec_data->map[span].type;
+
+ if (span_type == 'd')
+ continue;
+
+ for (i = span_start; i + 4 < span_end; i += 4)
+ {
+ uint32_t insn_1 = bfd_getl32 (contents + i);
+ uint32_t insn_2 = bfd_getl32 (contents + i + 4);
+
+ if (aarch64_erratum_sequence (insn_1, insn_2))
+ {
+ char *stub_name = NULL;
+ stub_name = (char *) bfd_malloc
+ (strlen ("__erratum_835769_veneer_") + 16);
+ if (stub_name != NULL)
+ sprintf
+ (stub_name,"__erratum_835769_veneer_%d", num_fixes);
+ else
+ return TRUE;
+
+ if (num_fixes == fix_table_size)
+ {
+ fix_table_size *= 2;
+ fixes =
+ (struct aarch64_erratum_835769_fix *)
+ bfd_realloc (fixes,
+ sizeof (struct aarch64_erratum_835769_fix)
+ * fix_table_size);
+ if (fixes == NULL)
+ return TRUE;
+ }
+
+ fixes[num_fixes].input_bfd = input_bfd;
+ fixes[num_fixes].section = section;
+ fixes[num_fixes].offset = i + 4;
+ fixes[num_fixes].veneered_insn = insn_2;
+ fixes[num_fixes].stub_name = stub_name;
+ fixes[num_fixes].stub_type = aarch64_stub_erratum_835769_veneer;
+ num_fixes++;
+ }
+ }
+ }
+ if (elf_section_data (section)->this_hdr.contents == NULL)
+ free (contents);
+ }
+
+ *fixes_p = fixes;
+ *num_fixes_p = num_fixes;
+ *fix_table_size_p = fix_table_size;
+ return FALSE;
+}
+
+/* Find or create a stub section. Returns a pointer to the stub section, and
+ the section to which the stub section will be attached (in *LINK_SEC_P).
+ LINK_SEC_P may be NULL. */
+
+static asection *
+elf_aarch64_create_or_find_stub_sec (asection **link_sec_p, asection *section,
+ struct elf_aarch64_link_hash_table *htab)
+{
+ asection *link_sec;
+ asection *stub_sec;
+
+ link_sec = htab->stub_group[section->id].link_sec;
+ BFD_ASSERT (link_sec != NULL);
+ stub_sec = htab->stub_group[section->id].stub_sec;
+
+ if (stub_sec == NULL)
+ {
+ stub_sec = htab->stub_group[link_sec->id].stub_sec;
+ if (stub_sec == NULL)
+ {
+ size_t namelen;
+ bfd_size_type len;
+ char *s_name;
+
+ namelen = strlen (link_sec->name);
+ len = namelen + sizeof (STUB_SUFFIX);
+ s_name = (char *) bfd_alloc (htab->stub_bfd, len);
+ if (s_name == NULL)
+ return NULL;
+
+ memcpy (s_name, link_sec->name, namelen);
+ memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
+ stub_sec = (*htab->add_stub_section) (s_name, link_sec);
+
+ if (stub_sec == NULL)
+ return NULL;
+ htab->stub_group[link_sec->id].stub_sec = stub_sec;
+ }
+ htab->stub_group[section->id].stub_sec = stub_sec;
+ }
+
+ if (link_sec_p)
+ *link_sec_p = link_sec;
+
+ return stub_sec;
+}
+
/* Determine and set the size of the stub section for a final link.
The basic idea here is to examine all the relocations looking for
@@ -2660,6 +3100,21 @@ elfNN_aarch64_size_stubs (bfd *output_bfd,
bfd_boolean stubs_always_before_branch;
bfd_boolean stub_changed = 0;
struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+ struct aarch64_erratum_835769_fix *erratum_835769_fixes = NULL;
+ unsigned int num_erratum_835769_fixes = 0;
+ unsigned int erratum_835769_fix_table_size = 10;
+ unsigned int i;
+
+ if (htab->fix_erratum_835769)
+ {
+ erratum_835769_fixes
+ = (struct aarch64_erratum_835769_fix *)
+ bfd_zmalloc
+ (sizeof (struct aarch64_erratum_835769_fix) *
+ erratum_835769_fix_table_size);
+ if (erratum_835769_fixes == NULL)
+ goto error_ret_free_local;
+ }
/* Propagate mach to stub bfd, because it may not have been
finalized when we created stub_bfd. */
@@ -2690,7 +3145,9 @@ elfNN_aarch64_size_stubs (bfd *output_bfd,
bfd *input_bfd;
unsigned int bfd_indx;
asection *stub_sec;
+ unsigned prev_num_erratum_835769_fixes = num_erratum_835769_fixes;
+ num_erratum_835769_fixes = 0;
for (input_bfd = info->input_bfds, bfd_indx = 0;
input_bfd != NULL; input_bfd = input_bfd->link.next, bfd_indx++)
{
@@ -2943,8 +3400,20 @@ elfNN_aarch64_size_stubs (bfd *output_bfd,
if (elf_section_data (section)->relocs == NULL)
free (internal_relocs);
}
+
+ if (htab->fix_erratum_835769)
+ {
+ /* Scan for sequences which might trigger erratum 835769. */
+ if (erratum_835769_scan (input_bfd, info, &erratum_835769_fixes,
+ &num_erratum_835769_fixes,
+ &erratum_835769_fix_table_size) != 0)
+ goto error_ret_free_local;
+ }
}
+ if (prev_num_erratum_835769_fixes != num_erratum_835769_fixes)
+ stub_changed = TRUE;
+
if (!stub_changed)
break;
@@ -2952,15 +3421,76 @@ elfNN_aarch64_size_stubs (bfd *output_bfd,
stub sections. */
for (stub_sec = htab->stub_bfd->sections;
stub_sec != NULL; stub_sec = stub_sec->next)
- stub_sec->size = 0;
+ {
+ /* Ignore non-stub sections. */
+ if (!strstr (stub_sec->name, STUB_SUFFIX))
+ continue;
+ stub_sec->size = 0;
+ }
bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
+ /* Add erratum 835769 veneers to stub section sizes too. */
+ if (htab->fix_erratum_835769)
+ for (i = 0; i < num_erratum_835769_fixes; i++)
+ {
+ stub_sec = elf_aarch64_create_or_find_stub_sec (NULL,
+ erratum_835769_fixes[i].section, htab);
+
+ if (stub_sec == NULL)
+ goto error_ret_free_local;
+
+ stub_sec->size += 8;
+ }
+
/* Ask the linker to do its stuff. */
(*htab->layout_sections_again) ();
stub_changed = FALSE;
}
+ /* Add stubs for erratum 835769 fixes now. */
+ if (htab->fix_erratum_835769)
+ {
+ for (i = 0; i < num_erratum_835769_fixes; i++)
+ {
+ struct elf_aarch64_stub_hash_entry *stub_entry;
+ char *stub_name = erratum_835769_fixes[i].stub_name;
+ asection *section = erratum_835769_fixes[i].section;
+ unsigned int section_id = erratum_835769_fixes[i].section->id;
+ asection *link_sec = htab->stub_group[section_id].link_sec;
+ asection *stub_sec = htab->stub_group[section_id].stub_sec;
+
+ stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name, TRUE, FALSE);
+ if (stub_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+ section->owner,
+ stub_name);
+ return FALSE;
+ }
+
+ stub_entry->stub_sec = stub_sec;
+ stub_entry->stub_offset = 0;
+ stub_entry->id_sec = link_sec;
+ stub_entry->stub_type = erratum_835769_fixes[i].stub_type;
+ stub_entry->target_section = section;
+ stub_entry->target_value = erratum_835769_fixes[i].offset;
+ stub_entry->veneered_insn = erratum_835769_fixes[i].veneered_insn;
+ stub_entry->output_name = erratum_835769_fixes[i].stub_name;
+ }
+
+ /* Stash the erratum 835769 fix array for use later in
+ elfNN_aarch64_write_section(). */
+ htab->aarch64_erratum_835769_fixes = erratum_835769_fixes;
+ htab->num_aarch64_erratum_835769_fixes = num_erratum_835769_fixes;
+ }
+ else
+ {
+ htab->aarch64_erratum_835769_fixes = NULL;
+ htab->num_aarch64_erratum_835769_fixes = 0;
+ }
+
return TRUE;
error_ret_free_local:
@@ -3053,7 +3583,7 @@ bfd_elfNN_aarch64_init_maps (bfd *abfd)
return;
if ((abfd->flags & DYNAMIC) != 0)
- return;
+ return;
hdr = &elf_symtab_hdr (abfd);
localsyms = hdr->sh_info;
@@ -3091,12 +3621,14 @@ void
bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
struct bfd_link_info *link_info,
int no_enum_warn,
- int no_wchar_warn, int pic_veneer)
+ int no_wchar_warn, int pic_veneer,
+ int fix_erratum_835769)
{
struct elf_aarch64_link_hash_table *globals;
globals = elf_aarch64_hash_table (link_info);
globals->pic_veneer = pic_veneer;
+ globals->fix_erratum_835769 = fix_erratum_835769;
BFD_ASSERT (is_aarch64_elf (output_bfd));
elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
@@ -3406,6 +3938,89 @@ symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
return value;
}
+/* Data for make_branch_to_erratum_835769_stub(). */
+
+struct erratum_835769_branch_to_stub_data
+{
+ asection *output_section;
+ bfd_byte *contents;
+};
+
+/* Helper to insert branches to erratum 835769 stubs in the right
+ places for a particular section. */
+
+static bfd_boolean
+make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
+ void *in_arg)
+{
+ struct elf_aarch64_stub_hash_entry *stub_entry;
+ struct erratum_835769_branch_to_stub_data *data;
+ bfd_byte *contents;
+ unsigned long branch_insn = 0;
+ bfd_vma veneered_insn_loc, veneer_entry_loc;
+ bfd_signed_vma branch_offset;
+ unsigned int target;
+ bfd *abfd;
+
+ stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
+ data = (struct erratum_835769_branch_to_stub_data *) in_arg;
+
+ if (stub_entry->target_section != data->output_section
+ || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
+ return TRUE;
+
+ contents = data->contents;
+ veneered_insn_loc = stub_entry->target_section->output_section->vma
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_value;
+ veneer_entry_loc = stub_entry->stub_sec->output_section->vma
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_offset;
+ branch_offset = veneer_entry_loc - veneered_insn_loc;
+
+ abfd = stub_entry->target_section->owner;
+ if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
+ (*_bfd_error_handler)
+ (_("%B: error: Erratum 835769 stub out "
+ "of range (input file too large)"), abfd);
+
+ target = stub_entry->target_value;
+ branch_insn = 0x14000000;
+ branch_offset >>= 2;
+ branch_offset &= 0x3ffffff;
+ branch_insn |= branch_offset;
+ bfd_putl32 (branch_insn, &contents[target]);
+
+ return TRUE;
+}
+
+static bfd_boolean
+elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *link_info,
+ asection *sec,
+ bfd_byte *contents)
+
+{
+ struct elf_aarch64_link_hash_table *globals =
+ elf_aarch64_hash_table (link_info);
+
+ if (globals == NULL)
+ return FALSE;
+
+ /* Fix code to point to erratum 835769 stubs. */
+ if (globals->fix_erratum_835769)
+ {
+ struct erratum_835769_branch_to_stub_data data;
+
+ data.output_section = sec;
+ data.contents = contents;
+ bfd_hash_traverse (&globals->stub_hash_table,
+ make_branch_to_erratum_835769_stub, &data);
+ }
+
+ return FALSE;
+}
+
/* Perform a relocation as part of a final link. */
static bfd_reloc_status_type
elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
@@ -5748,6 +6363,13 @@ aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
return FALSE;
break;
+ case aarch64_stub_erratum_835769_veneer:
+ if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
+ sizeof (aarch64_erratum_835769_stub)))
+ return FALSE;
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
+ return FALSE;
+ break;
default:
BFD_FAIL ();
}
@@ -6443,6 +7065,16 @@ elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
}
}
+ /* Init mapping symbols information to use later to distingush between
+ code and data while scanning for erratam 835769. */
+ if (htab->fix_erratum_835769)
+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
+ {
+ if (!is_aarch64_elf (ibfd))
+ continue;
+ bfd_elfNN_aarch64_init_maps (ibfd);
+ }
+
/* We now have determined the sizes of the various dynamic sections.
Allocate memory for them. */
relocs = FALSE;
@@ -7286,6 +7918,9 @@ const struct elf_size_info elfNN_aarch64_size_info =
#define elf_backend_size_info \
elfNN_aarch64_size_info
+#define elf_backend_write_section \
+ elfNN_aarch64_write_section
+
#define elf_backend_can_refcount 1
#define elf_backend_can_gc_sections 1
#define elf_backend_plt_readonly 1