diff options
author | Richard Sandiford <richard.sandiford@arm.com> | 2017-02-24 18:29:00 +0000 |
---|---|---|
committer | Richard Sandiford <richard.sandiford@arm.com> | 2017-02-24 18:29:00 +0000 |
commit | 582e12bf7602bb62ecc234402eb54044e83065e2 (patch) | |
tree | e188a618e62e3ecfcdbe0f4977372d7a664e63fa /opcodes/aarch64-opc.c | |
parent | f482d3044722558c3b16f54b33b0855bfbae36b1 (diff) | |
download | gdb-582e12bf7602bb62ecc234402eb54044e83065e2.zip gdb-582e12bf7602bb62ecc234402eb54044e83065e2.tar.gz gdb-582e12bf7602bb62ecc234402eb54044e83065e2.tar.bz2 |
[AArch64] Additional SVE instructions
This patch supports some additions to the SVE architecture prior to
its public release.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4x16)
(AARCH64_OPND_SVE_IMM_ROT1, AARCH64_OPND_SVE_IMM_ROT2)
(AARCH64_OPND_SVE_Zm3_INDEX, AARCH64_OPND_SVE_Zm3_22_INDEX)
(AARCH64_OPND_SVE_Zm4_INDEX): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (OP_SVE_HMH, OP_SVE_VMU_HSD, OP_SVE_VMVU_HSD)
(OP_SVE_VMVV_HSD, OP_SVE_VMVVU_HSD, OP_SVE_VM_HSD, OP_SVE_VUVV_HSD)
(OP_SVE_VUV_HSD, OP_SVE_VU_HSD, OP_SVE_VVVU_H, OP_SVE_VVVU_S)
(OP_SVE_VVVU_HSD, OP_SVE_VVV_D, OP_SVE_VVV_D_H, OP_SVE_VVV_H)
(OP_SVE_VVV_HSD, OP_SVE_VVV_S, OP_SVE_VVV_S_B, OP_SVE_VVV_SD_BH)
(OP_SVE_VV_BHSDQ, OP_SVE_VV_HSD, OP_SVE_VZVV_HSD, OP_SVE_VZV_HSD)
(OP_SVE_V_HSD): New macros.
(OP_SVE_VMU_SD, OP_SVE_VMVU_SD, OP_SVE_VM_SD, OP_SVE_VUVV_SD)
(OP_SVE_VU_SD, OP_SVE_VVVU_SD, OP_SVE_VVV_SD, OP_SVE_VZVV_SD)
(OP_SVE_VZV_SD, OP_SVE_V_SD): Delete.
(aarch64_opcode_table): Add new SVE instructions.
(aarch64_opcode_table): Use imm_rotate{1,2} instead of imm_rotate
for rotation operands. Add new SVE operands.
* aarch64-asm.h (ins_sve_addr_ri_s4): New inserter.
(ins_sve_quad_index): Likewise.
(ins_imm_rotate): Split into...
(ins_imm_rotate1, ins_imm_rotate2): ...these two inserters.
* aarch64-asm.c (aarch64_ins_imm_rotate): Split into...
(aarch64_ins_imm_rotate1, aarch64_ins_imm_rotate2): ...these two
functions.
(aarch64_ins_sve_addr_ri_s4): New function.
(aarch64_ins_sve_quad_index): Likewise.
(do_misc_encoding): Handle "MOV Zn.Q, Qm".
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_s4): New extractor.
(ext_sve_quad_index): Likewise.
(ext_imm_rotate): Split into...
(ext_imm_rotate1, ext_imm_rotate2): ...these two extractors.
* aarch64-dis.c (aarch64_ext_imm_rotate): Split into...
(aarch64_ext_imm_rotate1, aarch64_ext_imm_rotate2): ...these two
functions.
(aarch64_ext_sve_addr_ri_s4): New function.
(aarch64_ext_sve_quad_index): Likewise.
(aarch64_ext_sve_index): Allow quad indices.
(do_misc_decoding): Likewise.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc.h (FLD_SVE_i3h, FLD_SVE_rot1, FLD_SVE_rot2): New
aarch64_field_kinds.
(OPD_F_OD_MASK): Widen by one bit.
(OPD_F_NO_ZR): Bump accordingly.
(get_operand_field_width): New function.
* aarch64-opc.c (fields): Add new SVE fields.
(operand_general_constraint_met_p): Handle new SVE operands.
(aarch64_print_operand): Likewise.
* aarch64-opc-2.c: Regenerate.
gas/
* doc/c-aarch64.texi: Document that sve implies fp16, simd and compnum.
* config/tc-aarch64.c (parse_vector_type_for_operand): Allow .q
to be used with SVE registers.
(parse_operands): Handle new SVE operands.
(aarch64_features): Make "sve" require F16 rather than FP. Also
require COMPNUM.
* testsuite/gas/aarch64/sve.s: Add tests for new instructions.
Include compnum tests.
* testsuite/gas/aarch64/sve.d: Update accordingly.
* testsuite/gas/aarch64/sve-invalid.s: Add tests for new instructions.
* testsuite/gas/aarch64/sve-invalid.l: Update accordingly. Also
update expected output for new FMOV and MOV alternatives.
Diffstat (limited to 'opcodes/aarch64-opc.c')
-rw-r--r-- | opcodes/aarch64-opc.c | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c index dac6159..cc99c10 100644 --- a/opcodes/aarch64-opc.c +++ b/opcodes/aarch64-opc.c @@ -290,6 +290,7 @@ const aarch64_field fields[] = { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */ { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */ { 5, 1 }, /* SVE_i1: single-bit immediate. */ + { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */ { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */ { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */ { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */ @@ -303,6 +304,8 @@ const aarch64_field fields[] = { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */ { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */ { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */ + { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */ + { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */ { 22, 1 }, /* SVE_sz: 1-bit element size select. */ { 16, 4 }, /* SVE_tsz: triangular size select. */ { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */ @@ -1474,6 +1477,29 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_CLASS_SVE_REG: switch (type) { + case AARCH64_OPND_SVE_Zm3_INDEX: + case AARCH64_OPND_SVE_Zm3_22_INDEX: + case AARCH64_OPND_SVE_Zm4_INDEX: + size = get_operand_fields_width (get_operand_from_code (type)); + shift = get_operand_specific_data (&aarch64_operands[type]); + mask = (1 << shift) - 1; + if (opnd->reg.regno > mask) + { + assert (mask == 7 || mask == 15); + set_other_error (mismatch_detail, idx, + mask == 15 + ? _("z0-z15 expected") + : _("z0-z7 expected")); + return 0; + } + mask = (1 << (size - shift)) - 1; + if (!value_in_range_p (opnd->reglane.index, 0, mask)) + { + set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask); + return 0; + } + break; + case AARCH64_OPND_SVE_Zn_INDEX: size = aarch64_get_qualifier_esize (opnd->qualifier); if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1)) @@ -1798,6 +1824,11 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, } break; + case AARCH64_OPND_SVE_ADDR_RI_S4x16: + min_value = -8; + max_value = 7; + goto sve_imm_offset; + case AARCH64_OPND_SVE_ADDR_RR: case AARCH64_OPND_SVE_ADDR_RR_LSL1: case AARCH64_OPND_SVE_ADDR_RR_LSL2: @@ -2103,6 +2134,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, case AARCH64_OPND_IMM_ROT1: case AARCH64_OPND_IMM_ROT2: + case AARCH64_OPND_SVE_IMM_ROT2: if (opnd->imm.value != 0 && opnd->imm.value != 90 && opnd->imm.value != 180 @@ -2115,6 +2147,7 @@ operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx, break; case AARCH64_OPND_IMM_ROT3: + case AARCH64_OPND_SVE_IMM_ROT1: if (opnd->imm.value != 90 && opnd->imm.value != 270) { set_other_error (mismatch_detail, idx, @@ -3174,6 +3207,9 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, print_register_list (buf, size, opnd, "z"); break; + case AARCH64_OPND_SVE_Zm3_INDEX: + case AARCH64_OPND_SVE_Zm3_22_INDEX: + case AARCH64_OPND_SVE_Zm4_INDEX: case AARCH64_OPND_SVE_Zn_INDEX: snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno, aarch64_get_qualifier_name (opnd->qualifier), @@ -3214,6 +3250,8 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_IMM_ROT1: case AARCH64_OPND_IMM_ROT2: case AARCH64_OPND_IMM_ROT3: + case AARCH64_OPND_SVE_IMM_ROT1: + case AARCH64_OPND_SVE_IMM_ROT2: snprintf (buf, size, "#%" PRIi64, opnd->imm.value); break; @@ -3461,6 +3499,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc, case AARCH64_OPND_ADDR_SIMM9: case AARCH64_OPND_ADDR_SIMM9_2: case AARCH64_OPND_ADDR_SIMM10: + case AARCH64_OPND_SVE_ADDR_RI_S4x16: case AARCH64_OPND_SVE_ADDR_RI_S4xVL: case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL: case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL: |