diff options
author | Richard Sandiford <richard.sandiford@arm.com> | 2016-09-21 16:56:15 +0100 |
---|---|---|
committer | Richard Sandiford <richard.sandiford@arm.com> | 2016-09-21 16:56:15 +0100 |
commit | 98907a704908c5877d929c57b2ddb2e5f899d9a9 (patch) | |
tree | 66e651a02be2e7d48ebf44cb7f1a2865766461d9 | |
parent | 4df068de5214ff55b01ae320ec580f2928eb74e5 (diff) | |
download | gdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.zip gdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.tar.gz gdb-98907a704908c5877d929c57b2ddb2e5f899d9a9.tar.bz2 |
[AArch64][SVE 26/32] Add SVE MUL VL addressing modes
This patch adds support for addresses of the form:
[<base>, #<offset>, MUL VL]
This involves adding a new AARCH64_MOD_MUL_VL modifier, which is
why I split it out from the other addressing modes.
For LD2, LD3 and LD4, the offset must be a multiple of the structure
size, so for LD3 the possible values are 0, 3, 6, .... The patch
therefore extends value_aligned_p to handle non-power-of-2 alignments.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4xVL): New aarch64_opnd.
(AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, AARCH64_OPND_SVE_ADDR_RI_S4x3xVL)
(AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, AARCH64_OPND_SVE_ADDR_RI_S6xVL)
(AARCH64_OPND_SVE_ADDR_RI_S9xVL): Likewise.
(AARCH64_MOD_MUL_VL): New aarch64_modifier_kind.
opcodes/
* aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new MUL VL
operands.
* aarch64-opc.c (aarch64_operand_modifiers): Initialize
the AARCH64_MOD_MUL_VL entry.
(value_aligned_p): Cope with non-power-of-two alignments.
(operand_general_constraint_met_p): Handle the new MUL VL addresses.
(print_immediate_offset_address): Likewise.
(aarch64_print_operand): Likewise.
* aarch64-opc-2.c: Regenerate.
* aarch64-asm.h (ins_sve_addr_ri_s4xvl, ins_sve_addr_ri_s6xvl)
(ins_sve_addr_ri_s9xvl): New inserters.
* aarch64-asm.c (aarch64_ins_sve_addr_ri_s4xvl): New function.
(aarch64_ins_sve_addr_ri_s6xvl): Likewise.
(aarch64_ins_sve_addr_ri_s9xvl): Likewise.
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_s4xvl, ext_sve_addr_ri_s6xvl)
(ext_sve_addr_ri_s9xvl): New extractors.
* aarch64-dis.c (aarch64_ext_sve_addr_reg_mul_vl): New function.
(aarch64_ext_sve_addr_ri_s4xvl): Likewise.
(aarch64_ext_sve_addr_ri_s6xvl): Likewise.
(aarch64_ext_sve_addr_ri_s9xvl): Likewise.
* aarch64-dis-2.c: Regenerate.
gas/
* config/tc-aarch64.c (SHIFTED_NONE, SHIFTED_MUL_VL): New
parse_shift_modes.
(parse_shift): Handle SHIFTED_MUL_VL.
(parse_address_main): Add an imm_shift_mode parameter.
(parse_address, parse_sve_address): Update accordingly.
(parse_operands): Handle MUL VL addressing modes.
-rw-r--r-- | gas/ChangeLog | 9 | ||||
-rw-r--r-- | gas/config/tc-aarch64.c | 74 | ||||
-rw-r--r-- | include/ChangeLog | 8 | ||||
-rw-r--r-- | include/opcode/aarch64.h | 7 | ||||
-rw-r--r-- | opcodes/ChangeLog | 25 | ||||
-rw-r--r-- | opcodes/aarch64-asm-2.c | 45 | ||||
-rw-r--r-- | opcodes/aarch64-asm.c | 50 | ||||
-rw-r--r-- | opcodes/aarch64-asm.h | 3 | ||||
-rw-r--r-- | opcodes/aarch64-dis-2.c | 45 | ||||
-rw-r--r-- | opcodes/aarch64-dis.c | 72 | ||||
-rw-r--r-- | opcodes/aarch64-dis.h | 3 | ||||
-rw-r--r-- | opcodes/aarch64-opc-2.c | 6 | ||||
-rw-r--r-- | opcodes/aarch64-opc.c | 61 | ||||
-rw-r--r-- | opcodes/aarch64-tbl.h | 18 |
14 files changed, 373 insertions, 53 deletions
diff --git a/gas/ChangeLog b/gas/ChangeLog index c1f4152..4593a78 100644 --- a/gas/ChangeLog +++ b/gas/ChangeLog @@ -1,5 +1,14 @@ 2016-09-21 Richard Sandiford <richard.sandiford@arm.com> + * config/tc-aarch64.c (SHIFTED_NONE, SHIFTED_MUL_VL): New + parse_shift_modes. + (parse_shift): Handle SHIFTED_MUL_VL. + (parse_address_main): Add an imm_shift_mode parameter. + (parse_address, parse_sve_address): Update accordingly. + (parse_operands): Handle MUL VL addressing modes. + +2016-09-21 Richard Sandiford <richard.sandiford@arm.com> + * config/tc-aarch64.c (REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET): New register types. (get_reg_expected_msg): Handle them. diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c index e59333f..930b07a 100644 --- a/gas/config/tc-aarch64.c +++ b/gas/config/tc-aarch64.c @@ -2922,6 +2922,7 @@ find_reloc_table_entry (char **str) /* Mode argument to parse_shift and parser_shifter_operand. */ enum parse_shift_mode { + SHIFTED_NONE, /* no shifter allowed */ SHIFTED_ARITH_IMM, /* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or "#imm{,lsl #n}" */ SHIFTED_LOGIC_IMM, /* "rn{,lsl|lsr|asl|asr|ror #n}" or @@ -2929,6 +2930,7 @@ enum parse_shift_mode SHIFTED_LSL, /* bare "lsl #n" */ SHIFTED_MUL, /* bare "mul #n" */ SHIFTED_LSL_MSL, /* "lsl|msl #n" */ + SHIFTED_MUL_VL, /* "mul vl" */ SHIFTED_REG_OFFSET /* [su]xtw|sxtx {#n} or lsl #n */ }; @@ -2970,7 +2972,8 @@ parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) } if (kind == AARCH64_MOD_MUL - && mode != SHIFTED_MUL) + && mode != SHIFTED_MUL + && mode != SHIFTED_MUL_VL) { set_syntax_error (_("invalid use of 'MUL'")); return FALSE; @@ -3010,6 +3013,22 @@ parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) } break; + case SHIFTED_MUL_VL: + /* "MUL VL" consists of two separate tokens. Require the first + token to be "MUL" and look for a following "VL". */ + if (kind == AARCH64_MOD_MUL) + { + skip_whitespace (p); + if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2])) + { + p += 2; + kind = AARCH64_MOD_MUL_VL; + break; + } + } + set_syntax_error (_("only 'MUL VL' is permitted")); + return FALSE; + case SHIFTED_REG_OFFSET: if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX) @@ -3037,7 +3056,7 @@ parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) /* Parse shift amount. */ exp_has_prefix = 0; - if (mode == SHIFTED_REG_OFFSET && *p == ']') + if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL) exp.X_op = O_absent; else { @@ -3048,7 +3067,11 @@ parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode) } my_get_expression (&exp, &p, GE_NO_PREFIX, 0); } - if (exp.X_op == O_absent) + if (kind == AARCH64_MOD_MUL_VL) + /* For consistency, give MUL VL the same shift amount as an implicit + MUL #1. */ + operand->shifter.amount = 1; + else if (exp.X_op == O_absent) { if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix) { @@ -3268,6 +3291,7 @@ parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand, PC-relative (literal) label SVE: + [base,#imm,MUL VL] [base,Zm.D{,LSL #imm}] [base,Zm.S,(S|U)XTW {#imm}] [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements @@ -3307,15 +3331,20 @@ parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand, corresponding register. BASE_TYPE says which types of base register should be accepted and - OFFSET_TYPE says the same for offset registers. In all other respects, - it is the caller's responsibility to check for addressing modes not - supported by the instruction, and to set inst.reloc.type. */ + OFFSET_TYPE says the same for offset registers. IMM_SHIFT_MODE + is the type of shifter that is allowed for immediate offsets, + or SHIFTED_NONE if none. + + In all other respects, it is the caller's responsibility to check + for addressing modes not supported by the instruction, and to set + inst.reloc.type. */ static bfd_boolean parse_address_main (char **str, aarch64_opnd_info *operand, aarch64_opnd_qualifier_t *base_qualifier, aarch64_opnd_qualifier_t *offset_qualifier, - aarch64_reg_type base_type, aarch64_reg_type offset_type) + aarch64_reg_type base_type, aarch64_reg_type offset_type, + enum parse_shift_mode imm_shift_mode) { char *p = *str; const reg_entry *reg; @@ -3497,12 +3526,19 @@ parse_address_main (char **str, aarch64_opnd_info *operand, inst.reloc.type = entry->ldst_type; inst.reloc.pc_rel = entry->pc_rel; } - else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) + else { - set_syntax_error (_("invalid expression in the address")); - return FALSE; + if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1)) + { + set_syntax_error (_("invalid expression in the address")); + return FALSE; + } + /* [Xn,<expr> */ + if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p)) + /* [Xn,<expr>,<shifter> */ + if (! parse_shift (&p, operand, imm_shift_mode)) + return FALSE; } - /* [Xn,<expr> */ } } @@ -3582,10 +3618,10 @@ parse_address (char **str, aarch64_opnd_info *operand) { aarch64_opnd_qualifier_t base_qualifier, offset_qualifier; return parse_address_main (str, operand, &base_qualifier, &offset_qualifier, - REG_TYPE_R64_SP, REG_TYPE_R_Z); + REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE); } -/* Parse an address in which SVE vector registers are allowed. +/* Parse an address in which SVE vector registers and MUL VL are allowed. The arguments have the same meaning as for parse_address_main. Return TRUE on success. */ static bfd_boolean @@ -3594,7 +3630,8 @@ parse_sve_address (char **str, aarch64_opnd_info *operand, aarch64_opnd_qualifier_t *offset_qualifier) { return parse_address_main (str, operand, base_qualifier, offset_qualifier, - REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET); + REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET, + SHIFTED_MUL_VL); } /* Parse an operand for a MOVZ, MOVN or MOVK instruction. @@ -5938,11 +5975,18 @@ parse_operands (char *str, const aarch64_opcode *opcode) /* No qualifier. */ break; + case AARCH64_OPND_SVE_ADDR_RI_S4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S6xVL: + case AARCH64_OPND_SVE_ADDR_RI_S9xVL: case AARCH64_OPND_SVE_ADDR_RI_U6: case AARCH64_OPND_SVE_ADDR_RI_U6x2: case AARCH64_OPND_SVE_ADDR_RI_U6x4: case AARCH64_OPND_SVE_ADDR_RI_U6x8: - /* [X<n>{, #imm}] + /* [X<n>{, #imm, MUL VL}] + [X<n>{, #imm}] but recognizing SVE registers. */ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier, &offset_qualifier)); diff --git a/include/ChangeLog b/include/ChangeLog index 71df381..f28903f 100644 --- a/include/ChangeLog +++ b/include/ChangeLog @@ -1,5 +1,13 @@ 2016-09-21 Richard Sandiford <richard.sandiford@arm.com> + * opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4xVL): New aarch64_opnd. + (AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, AARCH64_OPND_SVE_ADDR_RI_S4x3xVL) + (AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, AARCH64_OPND_SVE_ADDR_RI_S6xVL) + (AARCH64_OPND_SVE_ADDR_RI_S9xVL): Likewise. + (AARCH64_MOD_MUL_VL): New aarch64_modifier_kind. + +2016-09-21 Richard Sandiford <richard.sandiford@arm.com> + * opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_U6): New aarch64_opnd. (AARCH64_OPND_SVE_ADDR_RI_U6x2, AARCH64_OPND_SVE_ADDR_RI_U6x4) (AARCH64_OPND_SVE_ADDR_RI_U6x8, AARCH64_OPND_SVE_ADDR_RR) diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h index e61ac9c..837d6bd 100644 --- a/include/opcode/aarch64.h +++ b/include/opcode/aarch64.h @@ -244,6 +244,12 @@ enum aarch64_opnd AARCH64_OPND_PRFOP, /* Prefetch operation. */ AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */ + AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */ + AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */ + AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */ + AARCH64_OPND_SVE_ADDR_RI_S4x4xVL, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */ + AARCH64_OPND_SVE_ADDR_RI_S6xVL, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */ + AARCH64_OPND_SVE_ADDR_RI_S9xVL, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */ AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */ AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */ AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */ @@ -786,6 +792,7 @@ enum aarch64_modifier_kind AARCH64_MOD_SXTW, AARCH64_MOD_SXTX, AARCH64_MOD_MUL, + AARCH64_MOD_MUL_VL, }; bfd_boolean diff --git a/opcodes/ChangeLog b/opcodes/ChangeLog index 2cedfb9..de052a9 100644 --- a/opcodes/ChangeLog +++ b/opcodes/ChangeLog @@ -1,5 +1,30 @@ 2016-09-21 Richard Sandiford <richard.sandiford@arm.com> + * aarch64-tbl.h (AARCH64_OPERANDS): Add entries for new MUL VL + operands. + * aarch64-opc.c (aarch64_operand_modifiers): Initialize + the AARCH64_MOD_MUL_VL entry. + (value_aligned_p): Cope with non-power-of-two alignments. + (operand_general_constraint_met_p): Handle the new MUL VL addresses. + (print_immediate_offset_address): Likewise. + (aarch64_print_operand): Likewise. + * aarch64-opc-2.c: Regenerate. + * aarch64-asm.h (ins_sve_addr_ri_s4xvl, ins_sve_addr_ri_s6xvl) + (ins_sve_addr_ri_s9xvl): New inserters. + * aarch64-asm.c (aarch64_ins_sve_addr_ri_s4xvl): New function. + (aarch64_ins_sve_addr_ri_s6xvl): Likewise. + (aarch64_ins_sve_addr_ri_s9xvl): Likewise. + * aarch64-asm-2.c: Regenerate. + * aarch64-dis.h (ext_sve_addr_ri_s4xvl, ext_sve_addr_ri_s6xvl) + (ext_sve_addr_ri_s9xvl): New extractors. + * aarch64-dis.c (aarch64_ext_sve_addr_reg_mul_vl): New function. + (aarch64_ext_sve_addr_ri_s4xvl): Likewise. + (aarch64_ext_sve_addr_ri_s6xvl): Likewise. + (aarch64_ext_sve_addr_ri_s9xvl): Likewise. + * aarch64-dis-2.c: Regenerate. + +2016-09-21 Richard Sandiford <richard.sandiford@arm.com> + * aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE address operands. * aarch64-opc.h (FLD_SVE_imm6, FLD_SVE_msz, FLD_SVE_xs_14) diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c index 47a414c..da590ca 100644 --- a/opcodes/aarch64-asm-2.c +++ b/opcodes/aarch64-asm-2.c @@ -480,12 +480,6 @@ aarch64_insert_operand (const aarch64_operand *self, case 27: case 35: case 36: - case 123: - case 124: - case 125: - case 126: - case 127: - case 128: case 129: case 130: case 131: @@ -494,7 +488,13 @@ aarch64_insert_operand (const aarch64_operand *self, case 134: case 135: case 136: + case 137: + case 138: case 139: + case 140: + case 141: + case 142: + case 145: return aarch64_ins_regno (self, info, code, inst); case 12: return aarch64_ins_reg_extended (self, info, code, inst); @@ -531,8 +531,8 @@ aarch64_insert_operand (const aarch64_operand *self, case 68: case 69: case 70: - case 120: - case 122: + case 126: + case 128: return aarch64_ins_imm (self, info, code, inst); case 38: case 39: @@ -587,46 +587,55 @@ aarch64_insert_operand (const aarch64_operand *self, case 90: case 91: case 92: - return aarch64_ins_sve_addr_ri_u6 (self, info, code, inst); + return aarch64_ins_sve_addr_ri_s4xvl (self, info, code, inst); case 93: + return aarch64_ins_sve_addr_ri_s6xvl (self, info, code, inst); case 94: + return aarch64_ins_sve_addr_ri_s9xvl (self, info, code, inst); case 95: case 96: case 97: case 98: + return aarch64_ins_sve_addr_ri_u6 (self, info, code, inst); case 99: case 100: case 101: case 102: case 103: case 104: - return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst); case 105: case 106: case 107: case 108: case 109: case 110: + return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst); case 111: case 112: - return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst); case 113: case 114: case 115: case 116: - return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst); case 117: - return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst); case 118: - return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst); + return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst); case 119: - return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst); + case 120: case 121: + case 122: + return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst); + case 123: + return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst); + case 124: + return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst); + case 125: + return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst); + case 127: return aarch64_ins_sve_scale (self, info, code, inst); - case 137: + case 143: return aarch64_ins_sve_index (self, info, code, inst); - case 138: - case 140: + case 144: + case 146: return aarch64_ins_sve_reglist (self, info, code, inst); default: assert (0); abort (); } diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c index 0d3b2c7..944a9eb 100644 --- a/opcodes/aarch64-asm.c +++ b/opcodes/aarch64-asm.c @@ -745,6 +745,56 @@ aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED, return NULL; } +/* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL], + where <simm4> is a 4-bit signed value and where <factor> is 1 plus + SELF's operand-dependent value. fields[0] specifies the field that + holds <base>. <simm4> is encoded in the SVE_imm4 field. */ +const char * +aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self, + const aarch64_opnd_info *info, + aarch64_insn *code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + int factor = 1 + get_operand_specific_data (self); + insert_field (self->fields[0], code, info->addr.base_regno, 0); + insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0); + return NULL; +} + +/* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL], + where <simm6> is a 6-bit signed value and where <factor> is 1 plus + SELF's operand-dependent value. fields[0] specifies the field that + holds <base>. <simm6> is encoded in the SVE_imm6 field. */ +const char * +aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self, + const aarch64_opnd_info *info, + aarch64_insn *code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + int factor = 1 + get_operand_specific_data (self); + insert_field (self->fields[0], code, info->addr.base_regno, 0); + insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0); + return NULL; +} + +/* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL], + where <simm9> is a 9-bit signed value and where <factor> is 1 plus + SELF's operand-dependent value. fields[0] specifies the field that + holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6 + and imm3 fields, with imm3 being the less-significant part. */ +const char * +aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self, + const aarch64_opnd_info *info, + aarch64_insn *code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + int factor = 1 + get_operand_specific_data (self); + insert_field (self->fields[0], code, info->addr.base_regno, 0); + insert_fields (code, info->addr.offset.imm / factor, 0, + 2, FLD_imm3, FLD_SVE_imm6); + return NULL; +} + /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6> is a 6-bit unsigned number and where <shift> is SELF's operand-dependent value. fields[0] specifies the base register field. */ diff --git a/opcodes/aarch64-asm.h b/opcodes/aarch64-asm.h index b81cfa1..5e13de0 100644 --- a/opcodes/aarch64-asm.h +++ b/opcodes/aarch64-asm.h @@ -69,6 +69,9 @@ AARCH64_DECL_OPD_INSERTER (ins_hint); AARCH64_DECL_OPD_INSERTER (ins_prfop); AARCH64_DECL_OPD_INSERTER (ins_reg_extended); AARCH64_DECL_OPD_INSERTER (ins_reg_shifted); +AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_s4xvl); +AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_s6xvl); +AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_s9xvl); AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_u6); AARCH64_DECL_OPD_INSERTER (ins_sve_addr_rr_lsl); AARCH64_DECL_OPD_INSERTER (ins_sve_addr_rz_xtw); diff --git a/opcodes/aarch64-dis-2.c b/opcodes/aarch64-dis-2.c index 3dd714f..48d6ce7 100644 --- a/opcodes/aarch64-dis-2.c +++ b/opcodes/aarch64-dis-2.c @@ -10426,12 +10426,6 @@ aarch64_extract_operand (const aarch64_operand *self, case 27: case 35: case 36: - case 123: - case 124: - case 125: - case 126: - case 127: - case 128: case 129: case 130: case 131: @@ -10440,7 +10434,13 @@ aarch64_extract_operand (const aarch64_operand *self, case 134: case 135: case 136: + case 137: + case 138: case 139: + case 140: + case 141: + case 142: + case 145: return aarch64_ext_regno (self, info, code, inst); case 8: return aarch64_ext_regrt_sysins (self, info, code, inst); @@ -10482,8 +10482,8 @@ aarch64_extract_operand (const aarch64_operand *self, case 68: case 69: case 70: - case 120: - case 122: + case 126: + case 128: return aarch64_ext_imm (self, info, code, inst); case 38: case 39: @@ -10540,46 +10540,55 @@ aarch64_extract_operand (const aarch64_operand *self, case 90: case 91: case 92: - return aarch64_ext_sve_addr_ri_u6 (self, info, code, inst); + return aarch64_ext_sve_addr_ri_s4xvl (self, info, code, inst); case 93: + return aarch64_ext_sve_addr_ri_s6xvl (self, info, code, inst); case 94: + return aarch64_ext_sve_addr_ri_s9xvl (self, info, code, inst); case 95: case 96: case 97: case 98: + return aarch64_ext_sve_addr_ri_u6 (self, info, code, inst); case 99: case 100: case 101: case 102: case 103: case 104: - return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst); case 105: case 106: case 107: case 108: case 109: case 110: + return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst); case 111: case 112: - return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst); case 113: case 114: case 115: case 116: - return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst); case 117: - return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst); case 118: - return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst); + return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst); case 119: - return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst); + case 120: case 121: + case 122: + return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst); + case 123: + return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst); + case 124: + return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst); + case 125: + return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst); + case 127: return aarch64_ext_sve_scale (self, info, code, inst); - case 137: + case 143: return aarch64_ext_sve_index (self, info, code, inst); - case 138: - case 140: + case 144: + case 146: return aarch64_ext_sve_reglist (self, info, code, inst); default: assert (0); abort (); } diff --git a/opcodes/aarch64-dis.c b/opcodes/aarch64-dis.c index ed77b4d..ba6befd 100644 --- a/opcodes/aarch64-dis.c +++ b/opcodes/aarch64-dis.c @@ -1186,6 +1186,78 @@ aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED, return 1; } +/* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL], + where <offset> is given by the OFFSET parameter and where <factor> is + 1 plus SELF's operand-dependent value. fields[0] specifies the field + that holds <base>. */ +static int +aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self, + aarch64_opnd_info *info, aarch64_insn code, + int64_t offset) +{ + info->addr.base_regno = extract_field (self->fields[0], code, 0); + info->addr.offset.imm = offset * (1 + get_operand_specific_data (self)); + info->addr.offset.is_reg = FALSE; + info->addr.writeback = FALSE; + info->addr.preind = TRUE; + if (offset != 0) + info->shifter.kind = AARCH64_MOD_MUL_VL; + info->shifter.amount = 1; + info->shifter.operator_present = (info->addr.offset.imm != 0); + info->shifter.amount_present = FALSE; + return 1; +} + +/* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL], + where <simm4> is a 4-bit signed value and where <factor> is 1 plus + SELF's operand-dependent value. fields[0] specifies the field that + holds <base>. <simm4> is encoded in the SVE_imm4 field. */ +int +aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self, + aarch64_opnd_info *info, aarch64_insn code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + int offset; + + offset = extract_field (FLD_SVE_imm4, code, 0); + offset = ((offset + 8) & 15) - 8; + return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset); +} + +/* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL], + where <simm6> is a 6-bit signed value and where <factor> is 1 plus + SELF's operand-dependent value. fields[0] specifies the field that + holds <base>. <simm6> is encoded in the SVE_imm6 field. */ +int +aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self, + aarch64_opnd_info *info, aarch64_insn code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + int offset; + + offset = extract_field (FLD_SVE_imm6, code, 0); + offset = (((offset + 32) & 63) - 32); + return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset); +} + +/* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL], + where <simm9> is a 9-bit signed value and where <factor> is 1 plus + SELF's operand-dependent value. fields[0] specifies the field that + holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6 + and imm3 fields, with imm3 being the less-significant part. */ +int +aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self, + aarch64_opnd_info *info, + aarch64_insn code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + int offset; + + offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3); + offset = (((offset + 256) & 511) - 256); + return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset); +} + /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset> is given by the OFFSET parameter and where <shift> is SELF's operand- dependent value. fields[0] specifies the base register field <base>. */ diff --git a/opcodes/aarch64-dis.h b/opcodes/aarch64-dis.h index 0ce2d89..5619877 100644 --- a/opcodes/aarch64-dis.h +++ b/opcodes/aarch64-dis.h @@ -91,6 +91,9 @@ AARCH64_DECL_OPD_EXTRACTOR (ext_hint); AARCH64_DECL_OPD_EXTRACTOR (ext_prfop); AARCH64_DECL_OPD_EXTRACTOR (ext_reg_extended); AARCH64_DECL_OPD_EXTRACTOR (ext_reg_shifted); +AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_s4xvl); +AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_s6xvl); +AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_s9xvl); AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_u6); AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_rr_lsl); AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_rz_xtw); diff --git a/opcodes/aarch64-opc-2.c b/opcodes/aarch64-opc-2.c index ed2b70b..a72f577 100644 --- a/opcodes/aarch64-opc-2.c +++ b/opcodes/aarch64-opc-2.c @@ -113,6 +113,12 @@ const struct aarch64_operand aarch64_operands[] = {AARCH64_OPND_CLASS_SYSTEM, "BARRIER_ISB", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "the ISB option name SY or an optional 4-bit unsigned immediate"}, {AARCH64_OPND_CLASS_SYSTEM, "PRFOP", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "a prefetch operation specifier"}, {AARCH64_OPND_CLASS_SYSTEM, "BARRIER_PSB", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "the PSB option name CSYNC"}, + {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4xVL", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by VL"}, + {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4x2xVL", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by 2*VL"}, + {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4x3xVL", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by 3*VL"}, + {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S4x4xVL", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 4-bit signed offset, multiplied by 4*VL"}, + {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S6xVL", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit signed offset, multiplied by VL"}, + {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_S9xVL", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 9-bit signed offset, multiplied by VL"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6x2", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset, multiplied by 2"}, {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6x4", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset, multiplied by 4"}, diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c index 6617e28..d0959b5 100644 --- a/opcodes/aarch64-opc.c +++ b/opcodes/aarch64-opc.c @@ -365,6 +365,7 @@ const struct aarch64_name_value_pair aarch64_operand_modifiers [] = {"sxtw", 0x6}, {"sxtx", 0x7}, {"mul", 0x0}, + {"mul vl", 0x0}, {NULL, 0}, }; @@ -486,10 +487,11 @@ value_in_range_p (int64_t value, int low, int high) return (value >= low && value <= high) ? 1 : 0; } +/* Return true if VALUE is a multiple of ALIGN. */ static inline int value_aligned_p (int64_t value, int align) { - return ((value & (align - 1)) == 0) ? 1 : 0; + return (value % align) == 0; } /* A signed value fits in a field. */ @@ -1666,6 +1668,49 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, } break; + case AARCH64_OPND_SVE_ADDR_RI_S4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: + min_value = -8; + max_value = 7; + sve_imm_offset_vl: + assert (!opnd->addr.offset.is_reg); + assert (opnd->addr.preind); + num = 1 + get_operand_specific_data (&aarch64_operands[type]); + min_value *= num; + max_value *= num; + if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present) + || (opnd->shifter.operator_present + && opnd->shifter.kind != AARCH64_MOD_MUL_VL)) + { + set_other_error (mismatch_detail, idx, + _("invalid addressing mode")); + return 0; + } + if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value)) + { + set_offset_out_of_range_error (mismatch_detail, idx, + min_value, max_value); + return 0; + } + if (!value_aligned_p (opnd->addr.offset.imm, num)) + { + set_unaligned_error (mismatch_detail, idx, num); + return 0; + } + break; + + case AARCH64_OPND_SVE_ADDR_RI_S6xVL: + min_value = -32; + max_value = 31; + goto sve_imm_offset_vl; + + case AARCH64_OPND_SVE_ADDR_RI_S9xVL: + min_value = -256; + max_value = 255; + goto sve_imm_offset_vl; + case AARCH64_OPND_SVE_ADDR_RI_U6: case AARCH64_OPND_SVE_ADDR_RI_U6x2: case AARCH64_OPND_SVE_ADDR_RI_U6x4: @@ -2645,7 +2690,13 @@ print_immediate_offset_address (char *buf, size_t size, } else { - if (opnd->addr.offset.imm) + if (opnd->shifter.operator_present) + { + assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL); + snprintf (buf, size, "[%s,#%d,mul vl]", + base, opnd->addr.offset.imm); + } + else if (opnd->addr.offset.imm) snprintf (buf, size, "[%s,#%d]", base, opnd->addr.offset.imm); else snprintf (buf, size, "[%s]", base); @@ -3114,6 +3165,12 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_ADDR_SIMM7: case AARCH64_OPND_ADDR_SIMM9: case AARCH64_OPND_ADDR_SIMM9_2: + case AARCH64_OPND_SVE_ADDR_RI_S4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: + case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL: + case AARCH64_OPND_SVE_ADDR_RI_S6xVL: + case AARCH64_OPND_SVE_ADDR_RI_S9xVL: case AARCH64_OPND_SVE_ADDR_RI_U6: case AARCH64_OPND_SVE_ADDR_RI_U6x2: case AARCH64_OPND_SVE_ADDR_RI_U6x4: diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h index aba4b2d..986cef6 100644 --- a/opcodes/aarch64-tbl.h +++ b/opcodes/aarch64-tbl.h @@ -2820,6 +2820,24 @@ struct aarch64_opcode aarch64_opcode_table[] = "a prefetch operation specifier") \ Y(SYSTEM, hint, "BARRIER_PSB", 0, F (), \ "the PSB option name CSYNC") \ + Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4xVL", \ + 0 << OPD_F_OD_LSB, F(FLD_Rn), \ + "an address with a 4-bit signed offset, multiplied by VL") \ + Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4x2xVL", \ + 1 << OPD_F_OD_LSB, F(FLD_Rn), \ + "an address with a 4-bit signed offset, multiplied by 2*VL") \ + Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4x3xVL", \ + 2 << OPD_F_OD_LSB, F(FLD_Rn), \ + "an address with a 4-bit signed offset, multiplied by 3*VL") \ + Y(ADDRESS, sve_addr_ri_s4xvl, "SVE_ADDR_RI_S4x4xVL", \ + 3 << OPD_F_OD_LSB, F(FLD_Rn), \ + "an address with a 4-bit signed offset, multiplied by 4*VL") \ + Y(ADDRESS, sve_addr_ri_s6xvl, "SVE_ADDR_RI_S6xVL", \ + 0 << OPD_F_OD_LSB, F(FLD_Rn), \ + "an address with a 6-bit signed offset, multiplied by VL") \ + Y(ADDRESS, sve_addr_ri_s9xvl, "SVE_ADDR_RI_S9xVL", \ + 0 << OPD_F_OD_LSB, F(FLD_Rn), \ + "an address with a 9-bit signed offset, multiplied by VL") \ Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6", 0 << OPD_F_OD_LSB, \ F(FLD_Rn), "an address with a 6-bit unsigned offset") \ Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6x2", 1 << OPD_F_OD_LSB, \ |