diff options
author | Richard Sandiford <richard.sandiford@arm.com> | 2017-02-24 18:29:00 +0000 |
---|---|---|
committer | Richard Sandiford <richard.sandiford@arm.com> | 2017-02-24 18:29:00 +0000 |
commit | 582e12bf7602bb62ecc234402eb54044e83065e2 (patch) | |
tree | e188a618e62e3ecfcdbe0f4977372d7a664e63fa /opcodes/aarch64-asm.c | |
parent | f482d3044722558c3b16f54b33b0855bfbae36b1 (diff) | |
download | fsf-binutils-gdb-582e12bf7602bb62ecc234402eb54044e83065e2.zip fsf-binutils-gdb-582e12bf7602bb62ecc234402eb54044e83065e2.tar.gz fsf-binutils-gdb-582e12bf7602bb62ecc234402eb54044e83065e2.tar.bz2 |
[AArch64] Additional SVE instructions
This patch supports some additions to the SVE architecture prior to
its public release.
include/
* opcode/aarch64.h (AARCH64_OPND_SVE_ADDR_RI_S4x16)
(AARCH64_OPND_SVE_IMM_ROT1, AARCH64_OPND_SVE_IMM_ROT2)
(AARCH64_OPND_SVE_Zm3_INDEX, AARCH64_OPND_SVE_Zm3_22_INDEX)
(AARCH64_OPND_SVE_Zm4_INDEX): New aarch64_opnds.
opcodes/
* aarch64-tbl.h (OP_SVE_HMH, OP_SVE_VMU_HSD, OP_SVE_VMVU_HSD)
(OP_SVE_VMVV_HSD, OP_SVE_VMVVU_HSD, OP_SVE_VM_HSD, OP_SVE_VUVV_HSD)
(OP_SVE_VUV_HSD, OP_SVE_VU_HSD, OP_SVE_VVVU_H, OP_SVE_VVVU_S)
(OP_SVE_VVVU_HSD, OP_SVE_VVV_D, OP_SVE_VVV_D_H, OP_SVE_VVV_H)
(OP_SVE_VVV_HSD, OP_SVE_VVV_S, OP_SVE_VVV_S_B, OP_SVE_VVV_SD_BH)
(OP_SVE_VV_BHSDQ, OP_SVE_VV_HSD, OP_SVE_VZVV_HSD, OP_SVE_VZV_HSD)
(OP_SVE_V_HSD): New macros.
(OP_SVE_VMU_SD, OP_SVE_VMVU_SD, OP_SVE_VM_SD, OP_SVE_VUVV_SD)
(OP_SVE_VU_SD, OP_SVE_VVVU_SD, OP_SVE_VVV_SD, OP_SVE_VZVV_SD)
(OP_SVE_VZV_SD, OP_SVE_V_SD): Delete.
(aarch64_opcode_table): Add new SVE instructions.
(aarch64_opcode_table): Use imm_rotate{1,2} instead of imm_rotate
for rotation operands. Add new SVE operands.
* aarch64-asm.h (ins_sve_addr_ri_s4): New inserter.
(ins_sve_quad_index): Likewise.
(ins_imm_rotate): Split into...
(ins_imm_rotate1, ins_imm_rotate2): ...these two inserters.
* aarch64-asm.c (aarch64_ins_imm_rotate): Split into...
(aarch64_ins_imm_rotate1, aarch64_ins_imm_rotate2): ...these two
functions.
(aarch64_ins_sve_addr_ri_s4): New function.
(aarch64_ins_sve_quad_index): Likewise.
(do_misc_encoding): Handle "MOV Zn.Q, Qm".
* aarch64-asm-2.c: Regenerate.
* aarch64-dis.h (ext_sve_addr_ri_s4): New extractor.
(ext_sve_quad_index): Likewise.
(ext_imm_rotate): Split into...
(ext_imm_rotate1, ext_imm_rotate2): ...these two extractors.
* aarch64-dis.c (aarch64_ext_imm_rotate): Split into...
(aarch64_ext_imm_rotate1, aarch64_ext_imm_rotate2): ...these two
functions.
(aarch64_ext_sve_addr_ri_s4): New function.
(aarch64_ext_sve_quad_index): Likewise.
(aarch64_ext_sve_index): Allow quad indices.
(do_misc_decoding): Likewise.
* aarch64-dis-2.c: Regenerate.
* aarch64-opc.h (FLD_SVE_i3h, FLD_SVE_rot1, FLD_SVE_rot2): New
aarch64_field_kinds.
(OPD_F_OD_MASK): Widen by one bit.
(OPD_F_NO_ZR): Bump accordingly.
(get_operand_field_width): New function.
* aarch64-opc.c (fields): Add new SVE fields.
(operand_general_constraint_met_p): Handle new SVE operands.
(aarch64_print_operand): Likewise.
* aarch64-opc-2.c: Regenerate.
gas/
* doc/c-aarch64.texi: Document that sve implies fp16, simd and compnum.
* config/tc-aarch64.c (parse_vector_type_for_operand): Allow .q
to be used with SVE registers.
(parse_operands): Handle new SVE operands.
(aarch64_features): Make "sve" require F16 rather than FP. Also
require COMPNUM.
* testsuite/gas/aarch64/sve.s: Add tests for new instructions.
Include compnum tests.
* testsuite/gas/aarch64/sve.d: Update accordingly.
* testsuite/gas/aarch64/sve-invalid.s: Add tests for new instructions.
* testsuite/gas/aarch64/sve-invalid.l: Update accordingly. Also
update expected output for new FMOV and MOV alternatives.
Diffstat (limited to 'opcodes/aarch64-asm.c')
-rw-r--r-- | opcodes/aarch64-asm.c | 78 |
1 files changed, 48 insertions, 30 deletions
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c index 47d56f9..516bdab 100644 --- a/opcodes/aarch64-asm.c +++ b/opcodes/aarch64-asm.c @@ -436,38 +436,27 @@ aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info, return NULL; } -/* Insert field rot for the rotate immediate in - FCMLA <Vd>.<T>, <Vn>.<T>, <Vm>.<T>, #<rotate>. */ +/* Insert 1-bit rotation immediate (#90 or #270). */ const char * -aarch64_ins_imm_rotate (const aarch64_operand *self, - const aarch64_opnd_info *info, - aarch64_insn *code, const aarch64_inst *inst) +aarch64_ins_imm_rotate1 (const aarch64_operand *self, + const aarch64_opnd_info *info, + aarch64_insn *code, const aarch64_inst *inst) { - uint64_t rot = info->imm.value / 90; - - switch (info->type) - { - case AARCH64_OPND_IMM_ROT1: - case AARCH64_OPND_IMM_ROT2: - /* value rot - 0 0 - 90 1 - 180 2 - 270 3 */ - assert (rot < 4U); - break; - case AARCH64_OPND_IMM_ROT3: - /* value rot - 90 0 - 270 1 */ - rot = (rot - 1) / 2; - assert (rot < 2U); - break; - default: - assert (0); - } + uint64_t rot = (info->imm.value - 90) / 180; + assert (rot < 2U); insert_field (self->fields[0], code, rot, inst->opcode->mask); + return NULL; +} +/* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */ +const char * +aarch64_ins_imm_rotate2 (const aarch64_operand *self, + const aarch64_opnd_info *info, + aarch64_insn *code, const aarch64_inst *inst) +{ + uint64_t rot = info->imm.value / 90; + assert (rot < 4U); + insert_field (self->fields[0], code, rot, inst->opcode->mask); return NULL; } @@ -883,6 +872,20 @@ aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self, return NULL; } +/* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4> + is a 4-bit signed number and where <shift> is SELF's operand-dependent + value. fields[0] specifies the base register field. */ +const char * +aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self, + const aarch64_opnd_info *info, aarch64_insn *code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + int factor = 1 << get_operand_specific_data (self); + insert_field (self->fields[0], code, info->addr.base_regno, 0); + insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0); + return NULL; +} + /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6> is a 6-bit unsigned number and where <shift> is SELF's operand-dependent value. fields[0] specifies the base register field. */ @@ -1040,6 +1043,21 @@ aarch64_ins_sve_limm_mov (const aarch64_operand *self, return aarch64_ins_limm (self, info, code, inst); } +/* Encode Zn[MM], where Zn occupies the least-significant part of the field + and where MM occupies the most-significant part. The operand-dependent + value specifies the number of bits in Zn. */ +const char * +aarch64_ins_sve_quad_index (const aarch64_operand *self, + const aarch64_opnd_info *info, aarch64_insn *code, + const aarch64_inst *inst ATTRIBUTE_UNUSED) +{ + unsigned int reg_bits = get_operand_specific_data (self); + assert (info->reglane.regno < (1U << reg_bits)); + unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno; + insert_all_fields (self, code, val); + return NULL; +} + /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field to use for Zn. */ const char * @@ -1265,8 +1283,8 @@ do_misc_encoding (aarch64_inst *inst) break; case OP_MOV_Z_V: /* Fill in the zero immediate. */ - insert_field (FLD_SVE_tsz, &inst->value, - 1 << aarch64_get_variant (inst), 0); + insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0, + 2, FLD_imm5, FLD_SVE_tszh); break; case OP_MOV_Z_Z: /* Copy Zn to Zm. */ |