aboutsummaryrefslogtreecommitdiff
path: root/opcodes
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2016-09-21 16:56:15 +0100
committerRichard Sandiford <richard.sandiford@arm.com>2016-09-21 16:56:15 +0100
commit98907a704908c5877d929c57b2ddb2e5f899d9a9 (patch)
tree66e651a02be2e7d48ebf44cb7f1a2865766461d9 /opcodes
parent4df068de5214ff55b01ae320ec580f2928eb74e5 (diff)
downloadgdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.zip
gdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.tar.gz
gdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.tar.bz2
[AArch64][SVE 26/32] Add SVE MUL VL addressing modes
This patch adds support for addresses of the form: [<base>, #<offset>, MUL VL] This involves adding a new AARCH64_MOD_MUL_VL modifier, which is why I split it out from the other addressing modes. For LD2, LD3 and LD4, the offset must be a multiple of the structure size, so for LD3 the possible values are 0, 3, 6, .... The patch therefore extends value_aligned_p to handle non-power-of-2 alignments. include/ * opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4xVL): New aarch64_opnd. (AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, AARCH64_OPND_SVE_ADDR_RI_S4x3xVL) (AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, AARCH64_OPND_SVE_ADDR_RI_S6xVL) (AARCH64_OPND_SVE_ADDR_RI_S9xVL): Likewise. (AARCH64_MOD_MUL_VL): New aarch64_modifier_kind. opcodes/ * aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new MUL VL operands. * aarch64-opc.c (aarch64_operand_modifiers): Initialize the AARCH64_MOD_MUL_VL entry. (value_aligned_p): Cope with non-power-of-two alignments. (operand_general_constraint_met_p): Handle the new MUL VL addresses. (print_immediate_offset_address): Likewise. (aarch64_print_operand): Likewise. * aarch64-opc-2.c: Regenerate. * aarch64-asm.h (ins_sve_addr_ri_s4xvl, ins_sve_addr_ri_s6xvl) (ins_sve_addr_ri_s9xvl): New inserters. * aarch64-asm.c (aarch64_ins_sve_addr_ri_s4xvl): New function. (aarch64_ins_sve_addr_ri_s6xvl): Likewise. (aarch64_ins_sve_addr_ri_s9xvl): Likewise. * aarch64-asm-2.c: Regenerate. * aarch64-dis.h (ext_sve_addr_ri_s4xvl, ext_sve_addr_ri_s6xvl) (ext_sve_addr_ri_s9xvl): New extractors. * aarch64-dis.c (aarch64_ext_sve_addr_reg_mul_vl): New function. (aarch64_ext_sve_addr_ri_s4xvl): Likewise. (aarch64_ext_sve_addr_ri_s6xvl): Likewise. (aarch64_ext_sve_addr_ri_s9xvl): Likewise. * aarch64-dis-2.c: Regenerate. gas/ * config/tc-aarch64.c (SHIFTED_NONE, SHIFTED_MUL_VL): New parse_shift_modes. (parse_shift): Handle SHIFTED_MUL_VL. (parse_address_main): Add an imm_shift_mode parameter. (parse_address, parse_sve_address): Update accordingly. (parse_operands): Handle MUL VL addressing modes.
Diffstat (limited to 'opcodes')
-rw-r--r--opcodes/ChangeLog25
-rw-r--r--opcodes/aarch64-asm-2.c45
-rw-r--r--opcodes/aarch64-asm.c50
-rw-r--r--opcodes/aarch64-asm.h3
-rw-r--r--opcodes/aarch64-dis-2.c45
-rw-r--r--opcodes/aarch64-dis.c72
-rw-r--r--opcodes/aarch64-dis.h3
-rw-r--r--opcodes/aarch64-opc-2.c6
-rw-r--r--opcodes/aarch64-opc.c61
-rw-r--r--opcodes/aarch64-tbl.h18
10 files changed, 290 insertions, 38 deletions
diff --git a/opcodes/ChangeLog b/opcodes/ChangeLog
index 2cedfb9..de052a9 100644
--- a/opcodes/ChangeLog
+++ b/opcodes/ChangeLog
@@ -1,5 +1,30 @@
2016-09-21 Richard Sandiford <richard.sandiford@arm.com>
+ * aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new MUL VL
+ operands.
+ * aarch64-opc.c (aarch64_operand_modifiers): Initialize
+ the AARCH64_MOD_MUL_VL entry.
+ (value_aligned_p): Cope with non-power-of-two alignments.
+ (operand_general_constraint_met_p): Handle the new MUL VL addresses.
+ (print_immediate_offset_address): Likewise.
+ (aarch64_print_operand): Likewise.
+ * aarch64-opc-2.c: Regenerate.
+ * aarch64-asm.h (ins_sve_addr_ri_s4xvl, ins_sve_addr_ri_s6xvl)
+ (ins_sve_addr_ri_s9xvl): New inserters.
+ * aarch64-asm.c (aarch64_ins_sve_addr_ri_s4xvl): New function.
+ (aarch64_ins_sve_addr_ri_s6xvl): Likewise.
+ (aarch64_ins_sve_addr_ri_s9xvl): Likewise.
+ * aarch64-asm-2.c: Regenerate.
+ * aarch64-dis.h (ext_sve_addr_ri_s4xvl, ext_sve_addr_ri_s6xvl)
+ (ext_sve_addr_ri_s9xvl): New extractors.
+ * aarch64-dis.c (aarch64_ext_sve_addr_reg_mul_vl): New function.
+ (aarch64_ext_sve_addr_ri_s4xvl): Likewise.
+ (aarch64_ext_sve_addr_ri_s6xvl): Likewise.
+ (aarch64_ext_sve_addr_ri_s9xvl): Likewise.
+ * aarch64-dis-2.c: Regenerate.
+
+2016-09-21 Richard Sandiford <richard.sandiford@arm.com>
+
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE
address operands.
* aarch64-opc.h (FLD_SVE_imm6, FLD_SVE_msz, FLD_SVE_xs_14)
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c
index 47a414c..da590ca 100644
--- a/opcodes/aarch64-asm-2.c
+++ b/opcodes/aarch64-asm-2.c
@@ -480,12 +480,6 @@ aarch64_insert_operand (const aarch64_operand *self,
case 27:
case 35:
case 36:
- case 123:
- case 124:
- case 125:
- case 126:
- case 127:
- case 128:
case 129:
case 130:
case 131:
@@ -494,7 +488,13 @@ aarch64_insert_operand (const aarch64_operand *self,
case 134:
case 135:
case 136:
+ case 137:
+ case 138:
case 139:
+ case 140:
+ case 141:
+ case 142:
+ case 145:
return aarch64_ins_regno (self, info, code, inst);
case 12:
return aarch64_ins_reg_extended (self, info, code, inst);
@@ -531,8 +531,8 @@ aarch64_insert_operand (const aarch64_operand *self,
case 68:
case 69:
case 70:
- case 120:
- case 122:
+ case 126:
+ case 128:
return aarch64_ins_imm (self, info, code, inst);
case 38:
case 39:
@@ -587,46 +587,55 @@ aarch64_insert_operand (const aarch64_operand *self,
case 90:
case 91:
case 92:
- return aarch64_ins_sve_addr_ri_u6 (self, info, code, inst);
+ return aarch64_ins_sve_addr_ri_s4xvl (self, info, code, inst);
case 93:
+ return aarch64_ins_sve_addr_ri_s6xvl (self, info, code, inst);
case 94:
+ return aarch64_ins_sve_addr_ri_s9xvl (self, info, code, inst);
case 95:
case 96:
case 97:
case 98:
+ return aarch64_ins_sve_addr_ri_u6 (self, info, code, inst);
case 99:
case 100:
case 101:
case 102:
case 103:
case 104:
- return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst);
case 105:
case 106:
case 107:
case 108:
case 109:
case 110:
+ return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst);
case 111:
case 112:
- return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst);
case 113:
case 114:
case 115:
case 116:
- return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst);
case 117:
- return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst);
case 118:
- return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst);
+ return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst);
case 119:
- return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst);
+ case 120:
case 121:
+ case 122:
+ return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst);
+ case 123:
+ return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst);
+ case 124:
+ return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst);
+ case 125:
+ return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst);
+ case 127:
return aarch64_ins_sve_scale (self, info, code, inst);
- case 137:
+ case 143:
return aarch64_ins_sve_index (self, info, code, inst);
- case 138:
- case 140:
+ case 144:
+ case 146:
return aarch64_ins_sve_reglist (self, info, code, inst);
default: assert (0); abort ();
}
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index 0d3b2c7..944a9eb 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -745,6 +745,56 @@ aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
return NULL;
}
+/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
+ where <simm4> is a 4-bit signed value and where <factor> is 1 plus
+ SELF's operand-dependent value. fields[0] specifies the field that
+ holds <base>. <simm4> is encoded in the SVE_imm4 field. */
+const char *
+aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int factor = 1 + get_operand_specific_data (self);
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
+ return NULL;
+}
+
+/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
+ where <simm6> is a 6-bit signed value and where <factor> is 1 plus
+ SELF's operand-dependent value. fields[0] specifies the field that
+ holds <base>. <simm6> is encoded in the SVE_imm6 field. */
+const char *
+aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int factor = 1 + get_operand_specific_data (self);
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
+ return NULL;
+}
+
+/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
+ where <simm9> is a 9-bit signed value and where <factor> is 1 plus
+ SELF's operand-dependent value. fields[0] specifies the field that
+ holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
+ and imm3 fields, with imm3 being the less-significant part. */
+const char *
+aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int factor = 1 + get_operand_specific_data (self);
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_fields (code, info->addr.offset.imm / factor, 0,
+ 2, FLD_imm3, FLD_SVE_imm6);
+ return NULL;
+}
+
/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
value. fields[0] specifies the base register field. */
diff --git a/opcodes/aarch64-asm.h b/opcodes/aarch64-asm.h
index b81cfa1..5e13de0 100644
--- a/opcodes/aarch64-asm.h
+++ b/opcodes/aarch64-asm.h
@@ -69,6 +69,9 @@ AARCH64_DECL_OPD_INSERTER (ins_hint);
AARCH64_DECL_OPD_INSERTER (ins_prfop);
AARCH64_DECL_OPD_INSERTER (ins_reg_extended);
AARCH64_DECL_OPD_INSERTER (ins_reg_shifted);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_s4xvl);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_s6xvl);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_s9xvl);
AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_u6);
AARCH64_DECL_OPD_INSERTER (ins_sve_addr_rr_lsl);
AARCH64_DECL_OPD_INSERTER (ins_sve_addr_rz_xtw);
diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c
index 3dd714f..48d6ce7 100644
--- a/opcodes/aarch64-dis-2.c
+++ b/opcodes/aarch64-dis-2.c
@@ -10426,12 +10426,6 @@ aarch64_extract_operand (const aarch64_operand *self,
case 27:
case 35:
case 36:
- case 123:
- case 124:
- case 125:
- case 126:
- case 127:
- case 128:
case 129:
case 130:
case 131:
@@ -10440,7 +10434,13 @@ aarch64_extract_operand (const aarch64_operand *self,
case 134:
case 135:
case 136:
+ case 137:
+ case 138:
case 139:
+ case 140:
+ case 141:
+ case 142:
+ case 145:
return aarch64_ext_regno (self, info, code, inst);
case 8:
return aarch64_ext_regrt_sysins (self, info, code, inst);
@@ -10482,8 +10482,8 @@ aarch64_extract_operand (const aarch64_operand *self,
case 68:
case 69:
case 70:
- case 120:
- case 122:
+ case 126:
+ case 128:
return aarch64_ext_imm (self, info, code, inst);
case 38:
case 39:
@@ -10540,46 +10540,55 @@ aarch64_extract_operand (const aarch64_operand *self,
case 90:
case 91:
case 92:
- return aarch64_ext_sve_addr_ri_u6 (self, info, code, inst);
+ return aarch64_ext_sve_addr_ri_s4xvl (self, info, code, inst);
case 93:
+ return aarch64_ext_sve_addr_ri_s6xvl (self, info, code, inst);
case 94:
+ return aarch64_ext_sve_addr_ri_s9xvl (self, info, code, inst);
case 95:
case 96:
case 97:
case 98:
+ return aarch64_ext_sve_addr_ri_u6 (self, info, code, inst);
case 99:
case 100:
case 101:
case 102:
case 103:
case 104:
- return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst);
case 105:
case 106:
case 107:
case 108:
case 109:
case 110:
+ return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst);
case 111:
case 112:
- return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst);
case 113:
case 114:
case 115:
case 116:
- return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst);
case 117:
- return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst);
case 118:
- return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst);
+ return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst);
case 119:
- return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst);
+ case 120:
case 121:
+ case 122:
+ return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst);
+ case 123:
+ return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst);
+ case 124:
+ return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst);
+ case 125:
+ return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst);
+ case 127:
return aarch64_ext_sve_scale (self, info, code, inst);
- case 137:
+ case 143:
return aarch64_ext_sve_index (self, info, code, inst);
- case 138:
- case 140:
+ case 144:
+ case 146:
return aarch64_ext_sve_reglist (self, info, code, inst);
default: assert (0); abort ();
}
diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c
index ed77b4d..ba6befd 100644
--- a/opcodes/aarch64-dis.c
+++ b/opcodes/aarch64-dis.c
@@ -1186,6 +1186,78 @@ aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
return 1;
}
+/* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
+ where <offset> is given by the OFFSET parameter and where <factor> is
+ 1 plus SELF's operand-dependent value. fields[0] specifies the field
+ that holds <base>. */
+static int
+aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ int64_t offset)
+{
+ info->addr.base_regno = extract_field (self->fields[0], code, 0);
+ info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
+ info->addr.offset.is_reg = FALSE;
+ info->addr.writeback = FALSE;
+ info->addr.preind = TRUE;
+ if (offset != 0)
+ info->shifter.kind = AARCH64_MOD_MUL_VL;
+ info->shifter.amount = 1;
+ info->shifter.operator_present = (info->addr.offset.imm != 0);
+ info->shifter.amount_present = FALSE;
+ return 1;
+}
+
+/* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
+ where <simm4> is a 4-bit signed value and where <factor> is 1 plus
+ SELF's operand-dependent value. fields[0] specifies the field that
+ holds <base>. <simm4> is encoded in the SVE_imm4 field. */
+int
+aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int offset;
+
+ offset = extract_field (FLD_SVE_imm4, code, 0);
+ offset = ((offset + 8) & 15) - 8;
+ return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
+}
+
+/* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
+ where <simm6> is a 6-bit signed value and where <factor> is 1 plus
+ SELF's operand-dependent value. fields[0] specifies the field that
+ holds <base>. <simm6> is encoded in the SVE_imm6 field. */
+int
+aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int offset;
+
+ offset = extract_field (FLD_SVE_imm6, code, 0);
+ offset = (((offset + 32) & 63) - 32);
+ return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
+}
+
+/* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
+ where <simm9> is a 9-bit signed value and where <factor> is 1 plus
+ SELF's operand-dependent value. fields[0] specifies the field that
+ holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
+ and imm3 fields, with imm3 being the less-significant part. */
+int
+aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
+ aarch64_opnd_info *info,
+ aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int offset;
+
+ offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
+ offset = (((offset + 256) & 511) - 256);
+ return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
+}
+
/* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
is given by the OFFSET parameter and where <shift> is SELF's operand-
dependent value. fields[0] specifies the base register field <base>. */
diff --git a/opcodes/aarch64-dis.h b/opcodes/aarch64-dis.h
index 0ce2d89..5619877 100644
--- a/opcodes/aarch64-dis.h
+++ b/opcodes/aarch64-dis.h
@@ -91,6 +91,9 @@ AARCH64_DECL_OPD_EXTRACTOR (ext_hint);
AARCH64_DECL_OPD_EXTRACTOR (ext_prfop);
AARCH64_DECL_OPD_EXTRACTOR (ext_reg_extended);
AARCH64_DECL_OPD_EXTRACTOR (ext_reg_shifted);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_s4xvl);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_s6xvl);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_s9xvl);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_u6);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_rr_lsl);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_rz_xtw);
diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c
index ed2b70b..a72f577 100644
--- a/opcodes/aarch64-opc-2.c
+++ b/opcodes/aarch64-opc-2.c
@@ -113,6 +113,12 @@ const struct aarch64_operand aarch64_operands[] =
{AARCH64_OPND_CLASS_SYSTEM, "BARRIER_ISB", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "the ISB option name SY or an optional 4-bit unsigned immediate"},
{AARCH64_OPND_CLASS_SYSTEM, "PRFOP", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "a prefetch operation specifier"},
{AARCH64_OPND_CLASS_SYSTEM, "BARRIER_PSB", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "the PSB option name CSYNC"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4xVL", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by VL"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4x2xVL", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by 2*VL"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4x3xVL", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by 3*VL"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4x4xVL", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by 4*VL"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S6xVL", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit signed offset, multiplied by VL"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S9xVL", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 9-bit signed offset, multiplied by VL"},
{AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset"},
{AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6x2", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset, multiplied by 2"},
{AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6x4", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset, multiplied by 4"},
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 6617e28..d0959b5 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -365,6 +365,7 @@ const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
{"sxtw", 0x6},
{"sxtx", 0x7},
{"mul", 0x0},
+ {"mul vl", 0x0},
{NULL, 0},
};
@@ -486,10 +487,11 @@ value_in_range_p (int64_t value, int low, int high)
return (value >= low && value <= high) ? 1 : 0;
}
+/* Return true if VALUE is a multiple of ALIGN. */
static inline int
value_aligned_p (int64_t value, int align)
{
- return ((value & (align - 1)) == 0) ? 1 : 0;
+ return (value % align) == 0;
}
/* A signed value fits in a field. */
@@ -1666,6 +1668,49 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
}
break;
+ case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
+ min_value = -8;
+ max_value = 7;
+ sve_imm_offset_vl:
+ assert (!opnd->addr.offset.is_reg);
+ assert (opnd->addr.preind);
+ num = 1 + get_operand_specific_data (&aarch64_operands[type]);
+ min_value *= num;
+ max_value *= num;
+ if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
+ || (opnd->shifter.operator_present
+ && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
+ {
+ set_other_error (mismatch_detail, idx,
+ _("invalid addressing mode"));
+ return 0;
+ }
+ if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
+ {
+ set_offset_out_of_range_error (mismatch_detail, idx,
+ min_value, max_value);
+ return 0;
+ }
+ if (!value_aligned_p (opnd->addr.offset.imm, num))
+ {
+ set_unaligned_error (mismatch_detail, idx, num);
+ return 0;
+ }
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
+ min_value = -32;
+ max_value = 31;
+ goto sve_imm_offset_vl;
+
+ case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
+ min_value = -256;
+ max_value = 255;
+ goto sve_imm_offset_vl;
+
case AARCH64_OPND_SVE_ADDR_RI_U6:
case AARCH64_OPND_SVE_ADDR_RI_U6x2:
case AARCH64_OPND_SVE_ADDR_RI_U6x4:
@@ -2645,7 +2690,13 @@ print_immediate_offset_address (char *buf, size_t size,
}
else
{
- if (opnd->addr.offset.imm)
+ if (opnd->shifter.operator_present)
+ {
+ assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
+ snprintf (buf, size, "[%s,#%d,mul vl]",
+ base, opnd->addr.offset.imm);
+ }
+ else if (opnd->addr.offset.imm)
snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm);
else
snprintf (buf, size, "[%s]", base);
@@ -3114,6 +3165,12 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
case AARCH64_OPND_ADDR_SIMM7:
case AARCH64_OPND_ADDR_SIMM9:
case AARCH64_OPND_ADDR_SIMM9_2:
+ case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
+ case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
case AARCH64_OPND_SVE_ADDR_RI_U6:
case AARCH64_OPND_SVE_ADDR_RI_U6x2:
case AARCH64_OPND_SVE_ADDR_RI_U6x4:
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index aba4b2d..986cef6 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -2820,6 +2820,24 @@ struct aarch64_opcode aarch64_opcode_table[] =
"a prefetch operation specifier") \
Y(SYSTEM, hint, "BARRIER_PSB", 0, F (), \
"the PSB option name CSYNC") \
+ Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4xVL", \
+ 0 << OPD_F_OD_LSB, F(FLD_Rn), \
+ "an address with a 4-bit signed offset, multiplied by VL") \
+ Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4x2xVL", \
+ 1 << OPD_F_OD_LSB, F(FLD_Rn), \
+ "an address with a 4-bit signed offset, multiplied by 2*VL") \
+ Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4x3xVL", \
+ 2 << OPD_F_OD_LSB, F(FLD_Rn), \
+ "an address with a 4-bit signed offset, multiplied by 3*VL") \
+ Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4x4xVL", \
+ 3 << OPD_F_OD_LSB, F(FLD_Rn), \
+ "an address with a 4-bit signed offset, multiplied by 4*VL") \
+ Y(ADDRESS, sve_addr_ri_s6xvl, "SVE_ADDR_RI_S6xVL", \
+ 0 << OPD_F_OD_LSB, F(FLD_Rn), \
+ "an address with a 6-bit signed offset, multiplied by VL") \
+ Y(ADDRESS, sve_addr_ri_s9xvl, "SVE_ADDR_RI_S9xVL", \
+ 0 << OPD_F_OD_LSB, F(FLD_Rn), \
+ "an address with a 9-bit signed offset, multiplied by VL") \
Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6", 0 << OPD_F_OD_LSB, \
F(FLD_Rn), "an address with a 6-bit unsigned offset") \
Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6x2", 1 << OPD_F_OD_LSB, \