aboutsummaryrefslogtreecommitdiff
path: root/bfd
diff options
context:
space:
mode:
authorMarcus Shawcroft <marcus.shawcroft@arm.com>2015-03-12 12:11:07 +0000
committerMarcus Shawcroft <marcus.shawcroft@arm.com>2015-03-23 14:45:37 +0000
commit3d14faea41888606f41466655aa1f0c6f0acf7e0 (patch)
tree483cff3aa8d71954383387873b01ef28ef0f2982 /bfd
parentf872121a87fe4c16ec43623b75804b39612e62ef (diff)
downloadgdb-3d14faea41888606f41466655aa1f0c6f0acf7e0.zip
gdb-3d14faea41888606f41466655aa1f0c6f0acf7e0.tar.gz
gdb-3d14faea41888606f41466655aa1f0c6f0acf7e0.tar.bz2
[AArch64] Tidy up in aarch64_mem_op_p().
Diffstat (limited to 'bfd')
-rw-r--r--bfd/ChangeLog5
-rw-r--r--bfd/elfnn-aarch64.c45
2 files changed, 31 insertions, 19 deletions
diff --git a/bfd/ChangeLog b/bfd/ChangeLog
index bd8d79d..7694d62 100644
--- a/bfd/ChangeLog
+++ b/bfd/ChangeLog
@@ -1,5 +1,10 @@
2015-03-23 Marcus Shawcroft <marcus.shawcroft@arm.com>
+ (aarch64_mem_op_p): Update comment. Rename rtn to rt2.
+ (aarch64_erratum_seqeunce): Rename rtn to rt2.
+
+2015-03-23 Marcus Shawcroft <marcus.shawcroft@arm.com>
+
* elfnn-aarch64.c (elfNN_aarch64_write_section): Adjust layout.
2015-03-19 Nick Clifton <nickc@redhat.com>
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index 4e85fd0..2073ed4 100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -2755,12 +2755,19 @@ group_sections (struct elf_aarch64_link_hash_table *htab,
#define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
#define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
-/* Classify an INSN if it is indeed a load/store. Return TRUE if INSN
- is a load/store along with the Rt and Rtn. Return FALSE if not a
- load/store. */
+/* Classify an INSN if it is indeed a load/store.
+
+ Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
+
+ For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
+ is set equal to RT.
+
+ For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
+
+ */
static bfd_boolean
-aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
+aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
bfd_boolean *pair, bfd_boolean *load)
{
uint32_t opcode;
@@ -2779,11 +2786,11 @@ aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
if (AARCH64_LDST_EX (insn))
{
*rt = AARCH64_RT (insn);
- *rtn = *rt;
+ *rt2 = *rt;
if (AARCH64_BIT (insn, 21) == 1)
{
*pair = TRUE;
- *rtn = AARCH64_RT2 (insn);
+ *rt2 = AARCH64_RT2 (insn);
}
*load = AARCH64_LD (insn);
return TRUE;
@@ -2795,7 +2802,7 @@ aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
{
*pair = TRUE;
*rt = AARCH64_RT (insn);
- *rtn = AARCH64_RT2 (insn);
+ *rt2 = AARCH64_RT2 (insn);
*load = AARCH64_LD (insn);
return TRUE;
}
@@ -2808,7 +2815,7 @@ aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
|| AARCH64_LDST_UIMM (insn))
{
*rt = AARCH64_RT (insn);
- *rtn = *rt;
+ *rt2 = *rt;
if (AARCH64_LDST_PCREL (insn))
*load = TRUE;
opc = AARCH64_BITS (insn, 22, 2);
@@ -2828,21 +2835,21 @@ aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
{
case 0:
case 2:
- *rtn = *rt + 3;
+ *rt2 = *rt + 3;
break;
case 4:
case 6:
- *rtn = *rt + 2;
+ *rt2 = *rt + 2;
break;
case 7:
- *rtn = *rt;
+ *rt2 = *rt;
break;
case 8:
case 10:
- *rtn = *rt + 1;
+ *rt2 = *rt + 1;
break;
default:
@@ -2862,21 +2869,21 @@ aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rtn,
case 0:
case 2:
case 4:
- *rtn = *rt + r;
+ *rt2 = *rt + r;
break;
case 1:
case 3:
case 5:
- *rtn = *rt + (r == 0 ? 2 : 3);
+ *rt2 = *rt + (r == 0 ? 2 : 3);
break;
case 6:
- *rtn = *rt + r;
+ *rt2 = *rt + r;
break;
case 7:
- *rtn = *rt + (r == 0 ? 2 : 3);
+ *rt2 = *rt + (r == 0 ? 2 : 3);
break;
default:
@@ -2922,7 +2929,7 @@ static bfd_boolean
aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
{
uint32_t rt;
- uint32_t rtn;
+ uint32_t rt2;
uint32_t rn;
uint32_t rm;
uint32_t ra;
@@ -2930,7 +2937,7 @@ aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
bfd_boolean load;
if (aarch64_mlxl_p (insn_2)
- && aarch64_mem_op_p (insn_1, &rt, &rtn, &pair, &load))
+ && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
{
/* Any SIMD memory op is independent of the subsequent MLA
by definition of the erratum. */
@@ -2946,7 +2953,7 @@ aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
and this is not an erratum sequence. */
if (load &&
(rt == rn || rt == rm || rt == ra
- || (pair && (rtn == rn || rtn == rm || rtn == ra))))
+ || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
return FALSE;
/* We conservatively put out stubs for all other cases (including