aboutsummaryrefslogtreecommitdiff
path: root/opcodes/aarch64-opc.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2016-09-21 16:56:15 +0100
committerRichard Sandiford <richard.sandiford@arm.com>2016-09-21 16:56:15 +0100
commit98907a704908c5877d929c57b2ddb2e5f899d9a9 (patch)
tree66e651a02be2e7d48ebf44cb7f1a2865766461d9 /opcodes/aarch64-opc.c
parent4df068de5214ff55b01ae320ec580f2928eb74e5 (diff)
downloadgdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.zip
gdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.tar.gz
gdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.tar.bz2
[AArch64][SVE 26/32] Add SVE MUL VL addressing modes
This patch adds support for addresses of the form: [<base>, #<offset>, MUL VL] This involves adding a new AARCH64_MOD_MUL_VL modifier, which is why I split it out from the other addressing modes. For LD2, LD3 and LD4, the offset must be a multiple of the structure size, so for LD3 the possible values are 0, 3, 6, .... The patch therefore extends value_aligned_p to handle non-power-of-2 alignments. include/ * opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4xVL): New aarch64_opnd. (AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, AARCH64_OPND_SVE_ADDR_RI_S4x3xVL) (AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, AARCH64_OPND_SVE_ADDR_RI_S6xVL) (AARCH64_OPND_SVE_ADDR_RI_S9xVL): Likewise. (AARCH64_MOD_MUL_VL): New aarch64_modifier_kind. opcodes/ * aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new MUL VL operands. * aarch64-opc.c (aarch64_operand_modifiers): Initialize the AARCH64_MOD_MUL_VL entry. (value_aligned_p): Cope with non-power-of-two alignments. (operand_general_constraint_met_p): Handle the new MUL VL addresses. (print_immediate_offset_address): Likewise. (aarch64_print_operand): Likewise. * aarch64-opc-2.c: Regenerate. * aarch64-asm.h (ins_sve_addr_ri_s4xvl, ins_sve_addr_ri_s6xvl) (ins_sve_addr_ri_s9xvl): New inserters. * aarch64-asm.c (aarch64_ins_sve_addr_ri_s4xvl): New function. (aarch64_ins_sve_addr_ri_s6xvl): Likewise. (aarch64_ins_sve_addr_ri_s9xvl): Likewise. * aarch64-asm-2.c: Regenerate. * aarch64-dis.h (ext_sve_addr_ri_s4xvl, ext_sve_addr_ri_s6xvl) (ext_sve_addr_ri_s9xvl): New extractors. * aarch64-dis.c (aarch64_ext_sve_addr_reg_mul_vl): New function. (aarch64_ext_sve_addr_ri_s4xvl): Likewise. (aarch64_ext_sve_addr_ri_s6xvl): Likewise. (aarch64_ext_sve_addr_ri_s9xvl): Likewise. * aarch64-dis-2.c: Regenerate. gas/ * config/tc-aarch64.c (SHIFTED_NONE, SHIFTED_MUL_VL): New parse_shift_modes. (parse_shift): Handle SHIFTED_MUL_VL. (parse_address_main): Add an imm_shift_mode parameter. (parse_address, parse_sve_address): Update accordingly. (parse_operands): Handle MUL VL addressing modes.
Diffstat (limited to 'opcodes/aarch64-opc.c')
-rw-r--r--opcodes/aarch64-opc.c61
1 files changed, 59 insertions, 2 deletions
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 6617e28..d0959b5 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -365,6 +365,7 @@ const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
{"sxtw", 0x6},
{"sxtx", 0x7},
{"mul", 0x0},
+ {"mul vl", 0x0},
{NULL, 0},
};
@@ -486,10 +487,11 @@ value_in_range_p (int64_t value, int low, int high)
return (value >= low && value <= high) ? 1 : 0;
}
+/* Return true if VALUE is a multiple of ALIGN. */
static inline int
value_aligned_p (int64_t value, int align)
{
- return ((value & (align - 1)) == 0) ? 1 : 0;
+ return (value % align) == 0;
}
/* A signed value fits in a field. */
@@ -1666,6 +1668,49 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
}
break;
+ case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
+ min_value = -8;
+ max_value = 7;
+ sve_imm_offset_vl:
+ assert (!opnd->addr.offset.is_reg);
+ assert (opnd->addr.preind);
+ num = 1 + get_operand_specific_data (&aarch64_operands[type]);
+ min_value *= num;
+ max_value *= num;
+ if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
+ || (opnd->shifter.operator_present
+ && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
+ {
+ set_other_error (mismatch_detail, idx,
+ _("invalid addressing mode"));
+ return 0;
+ }
+ if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
+ {
+ set_offset_out_of_range_error (mismatch_detail, idx,
+ min_value, max_value);
+ return 0;
+ }
+ if (!value_aligned_p (opnd->addr.offset.imm, num))
+ {
+ set_unaligned_error (mismatch_detail, idx, num);
+ return 0;
+ }
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
+ min_value = -32;
+ max_value = 31;
+ goto sve_imm_offset_vl;
+
+ case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
+ min_value = -256;
+ max_value = 255;
+ goto sve_imm_offset_vl;
+
case AARCH64_OPND_SVE_ADDR_RI_U6:
case AARCH64_OPND_SVE_ADDR_RI_U6x2:
case AARCH64_OPND_SVE_ADDR_RI_U6x4:
@@ -2645,7 +2690,13 @@ print_immediate_offset_address (char *buf, size_t size,
}
else
{
- if (opnd->addr.offset.imm)
+ if (opnd->shifter.operator_present)
+ {
+ assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
+ snprintf (buf, size, "[%s,#%d,mul vl]",
+ base, opnd->addr.offset.imm);
+ }
+ else if (opnd->addr.offset.imm)
snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
else
snprintf (buf, size, "[%s]", base);
@@ -3114,6 +3165,12 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
case AARCH64_OPND_ADDR_SIMM7:
case AARCH64_OPND_ADDR_SIMM9:
case AARCH64_OPND_ADDR_SIMM9_2:
+ case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
case AARCH64_OPND_SVE_ADDR_RI_U6:
case AARCH64_OPND_SVE_ADDR_RI_U6x2:
case AARCH64_OPND_SVE_ADDR_RI_U6x4: