aboutsummaryrefslogtreecommitdiff
path: root/bfd/elfxx-aarch64.c
diff options
context:
space:
mode:
authorMarcus Shawcroft <marcus.shawcroft@arm.com>2015-02-24 12:04:41 +0000
committerMarcus Shawcroft <marcus.shawcroft@arm.com>2015-04-01 13:16:38 +0100
commit4106101c449e53dd6b61ec824b196f84b3f3daa5 (patch)
tree4adf977e421b6453ead4a941effac892d0cffa87 /bfd/elfxx-aarch64.c
parentcf39cfc52ebd683d55fc396a77355f34b5094c04 (diff)
downloadgdb-4106101c449e53dd6b61ec824b196f84b3f3daa5.zip
gdb-4106101c449e53dd6b61ec824b196f84b3f3daa5.tar.gz
gdb-4106101c449e53dd6b61ec824b196f84b3f3daa5.tar.bz2
[AArch64] Workaround for Cortex A53 erratum 843419
Some early revisions of the Cortex-A53 have an erratum (843419). The details of the erratum are quite complex and involve dynamic conditions. For the purposes of the workaround we have simplified the static conditions to an ADRP in the last two instructions of a 4KByte page, followed within four instructions by a load/store dependent on the ADRP. This patch adds support to conservatively scan for and workaround Cortex A53 erratum 843419. There are two different workaround strategies used. The first is to rewrite ADRP instructions which form part of an erratum sequence with an ADR instruction. In situations where the ADR provides insufficient offset the dependent load or store instruction from the sequence is moved to a stub section and branches are inserted from the original sequence to the relocated instruction and back again. Stub section sizes are rounded up to a multiple of 4096 in order to ensure that the act of inserting work around stubs does not create more errata sequences. Workaround stubs are always inserted into the stub section associated with the input section containing the erratum sequence. This ensures that the fully relocated form of the veneered load store instruction is available at the point in time when the stub section is written.
Diffstat (limited to 'bfd/elfxx-aarch64.c')
-rw-r--r--bfd/elfxx-aarch64.c29
1 files changed, 25 insertions, 4 deletions
diff --git a/bfd/elfxx-aarch64.c b/bfd/elfxx-aarch64.c
index b513a54..db9d4fa 100644
--- a/bfd/elfxx-aarch64.c
+++ b/bfd/elfxx-aarch64.c
@@ -25,6 +25,26 @@
#define MASK(n) ((1u << (n)) - 1)
+/* Sign-extend VALUE, which has the indicated number of BITS. */
+
+bfd_signed_vma
+_bfd_aarch64_sign_extend (bfd_vma value, int bits)
+{
+ if (value & ((bfd_vma) 1 << (bits - 1)))
+ /* VALUE is negative. */
+ value |= ((bfd_vma) - 1) << bits;
+
+ return value;
+}
+
+/* Decode the IMM field of ADRP. */
+
+uint32_t
+_bfd_aarch64_decode_adrp_imm (uint32_t insn)
+{
+ return (((insn >> 5) & MASK (19)) << 2) | ((insn >> 29) & MASK (2));
+}
+
/* Reencode the imm field of add immediate. */
static inline uint32_t
reencode_add_imm (uint32_t insn, uint32_t imm)
@@ -32,9 +52,10 @@ reencode_add_imm (uint32_t insn, uint32_t imm)
return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
}
-/* Reencode the imm field of adr. */
-static inline uint32_t
-reencode_adr_imm (uint32_t insn, uint32_t imm)
+/* Reencode the IMM field of ADR. */
+
+uint32_t
+_bfd_aarch64_reencode_adr_imm (uint32_t insn, uint32_t imm)
{
return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
| ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
@@ -220,7 +241,7 @@ _bfd_aarch64_elf_put_addend (bfd *abfd,
case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
- contents = reencode_adr_imm (contents, addend);
+ contents = _bfd_aarch64_reencode_adr_imm (contents, addend);
break;
case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC: