diff options
author | Matthew Malcomson <matthew.malcomson@arm.com> | 2019-11-07 17:10:01 +0000 |
---|---|---|
committer | Matthew Malcomson <matthew.malcomson@arm.com> | 2019-11-07 17:11:52 +0000 |
commit | 8382113fdb028386a335e8dee9ac04ebc8cf04a1 (patch) | |
tree | c2e565fa25ec35788e5b2dbc3212629a3218ee64 /opcodes/aarch64-asm-2.c | |
parent | eb5bbc482128b08d2ee8a2470951a74d8351146f (diff) | |
download | gdb-8382113fdb028386a335e8dee9ac04ebc8cf04a1.zip gdb-8382113fdb028386a335e8dee9ac04ebc8cf04a1.tar.gz gdb-8382113fdb028386a335e8dee9ac04ebc8cf04a1.tar.bz2 |
[binutils][aarch64] Matrix Multiply extension enablement [8/X]
Hi,
This patch is part of a series that adds support for Armv8.6-A
(Matrix Multiply and BFloat16 extensions) to binutils.
This patch introduces the Matrix Multiply (Int8, F32, F64) extensions
to the aarch64 backend.
The following instructions are added: {s/u}mmla, usmmla, {us/su}dot,
fmmla, ld1rob, ld1roh, d1row, ld1rod, uzip{1/2}, trn{1/2}.
Committed on behalf of Mihail Ionescu.
gas/ChangeLog:
2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com>
* config/tc-aarch64.c: Add new arch fetures to suppport the mm extension.
(parse_operands): Add new operand.
* testsuite/gas/aarch64/i8mm.s: New test.
* testsuite/gas/aarch64/i8mm.d: New test.
* testsuite/gas/aarch64/f32mm.s: New test.
* testsuite/gas/aarch64/f32mm.d: New test.
* testsuite/gas/aarch64/f64mm.s: New test.
* testsuite/gas/aarch64/f64mm.d: New test.
* testsuite/gas/aarch64/sve-movprfx-mm.s: New test.
* testsuite/gas/aarch64/sve-movprfx-mm.d: New test.
include/ChangeLog:
2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com>
* opcode/aarch64.h (AARCH64_FEATURE_I8MM): New.
(AARCH64_FEATURE_F32MM): New.
(AARCH64_FEATURE_F64MM): New.
(AARCH64_OPND_SVE_ADDR_RI_S4x32): New.
(enum aarch64_insn_class): Add new instruction class "aarch64_misc" for
instructions that do not require special handling.
opcodes/ChangeLog:
2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com>
* aarch64-tbl.h (aarch64_feature_i8mm_sve, aarch64_feature_f32mm_sve,
aarch64_feature_f64mm_sve, aarch64_feature_i8mm, aarch64_feature_f32mm,
aarch64_feature_f64mm): New feature sets.
(INT8MATMUL_INSN, F64MATMUL_SVE_INSN, F64MATMUL_INSN,
F32MATMUL_SVE_INSN, F32MATMUL_INSN): New macros to define matrix multiply
instructions.
(I8MM_SVE, F32MM_SVE, F64MM_SVE, I8MM, F32MM, F64MM): New feature set
macros.
(QL_MMLA64, OP_SVE_SBB): New qualifiers.
(OP_SVE_QQQ): New qualifier.
(INT8MATMUL_SVE_INSNC, F64MATMUL_SVE_INSNC,
F32MATMUL_SVE_INSNC): New feature set for bfloat16 instructions to support
the movprfx constraint.
(aarch64_opcode_table): Support for SVE_ADDR_RI_S4x32.
(aarch64_opcode_table): Define new instructions smmla,
ummla, usmmla, usdot, sudot, fmmla, ld1rob, ld1roh, ld1row, ld1rod
uzip{1/2}, trn{1/2}.
* aarch64-opc.c (operand_general_constraint_met_p): Handle
AARCH64_OPND_SVE_ADDR_RI_S4x32.
(aarch64_print_operand): Handle AARCH64_OPND_SVE_ADDR_RI_S4x32.
* aarch64-dis-2.c (aarch64_opcode_lookup_1, aarch64_find_next_opcode):
Account for new instructions.
* opcodes/aarch64-asm-2.c (aarch64_insert_operand): Support the new
S4x32 operand.
* aarch64-opc-2.c (aarch64_operands): Support the new S4x32 operand.
Regression tested on arm-none-eabi.
Is it ok for trunk?
Regards,
Mihail
Diffstat (limited to 'opcodes/aarch64-asm-2.c')
-rw-r--r-- | opcodes/aarch64-asm-2.c | 77 |
1 files changed, 39 insertions, 38 deletions
diff --git a/opcodes/aarch64-asm-2.c b/opcodes/aarch64-asm-2.c index 20472ae..3fa081a 100644 --- a/opcodes/aarch64-asm-2.c +++ b/opcodes/aarch64-asm-2.c @@ -628,7 +628,6 @@ aarch64_insert_operand (const aarch64_operand *self, case 28: case 29: case 30: - case 162: case 163: case 164: case 165: @@ -638,7 +637,7 @@ aarch64_insert_operand (const aarch64_operand *self, case 169: case 170: case 171: - case 186: + case 172: case 187: case 188: case 189: @@ -647,8 +646,9 @@ aarch64_insert_operand (const aarch64_operand *self, case 192: case 193: case 194: - case 200: - case 203: + case 195: + case 201: + case 204: return aarch64_ins_regno (self, info, code, inst, errors); case 14: return aarch64_ins_reg_extended (self, info, code, inst, errors); @@ -660,7 +660,7 @@ aarch64_insert_operand (const aarch64_operand *self, case 32: case 33: case 34: - case 206: + case 207: return aarch64_ins_reglane (self, info, code, inst, errors); case 35: return aarch64_ins_reglist (self, info, code, inst, errors); @@ -694,9 +694,8 @@ aarch64_insert_operand (const aarch64_operand *self, case 80: case 81: case 82: - case 159: - case 161: - case 178: + case 160: + case 162: case 179: case 180: case 181: @@ -704,7 +703,8 @@ aarch64_insert_operand (const aarch64_operand *self, case 183: case 184: case 185: - case 205: + case 186: + case 206: return aarch64_ins_imm (self, info, code, inst, errors); case 43: case 44: @@ -714,10 +714,10 @@ aarch64_insert_operand (const aarch64_operand *self, case 47: return aarch64_ins_advsimd_imm_modified (self, info, code, inst, errors); case 51: - case 149: + case 150: return aarch64_ins_fpimm (self, info, code, inst, errors); case 68: - case 157: + case 158: return aarch64_ins_limm (self, info, code, inst, errors); case 69: return aarch64_ins_aimm (self, info, code, inst, errors); @@ -727,11 +727,11 @@ aarch64_insert_operand (const aarch64_operand *self, return aarch64_ins_fbits (self, info, code, inst, errors); case 73: case 74: - case 154: + case 155: return aarch64_ins_imm_rotate2 (self, info, code, inst, errors); case 75: - case 153: - case 155: + case 154: + case 156: return aarch64_ins_imm_rotate1 (self, info, code, inst, errors); case 76: case 77: @@ -774,22 +774,22 @@ aarch64_insert_operand (const aarch64_operand *self, case 106: return aarch64_ins_hint (self, info, code, inst, errors); case 107: - return aarch64_ins_sve_addr_ri_s4 (self, info, code, inst, errors); case 108: + return aarch64_ins_sve_addr_ri_s4 (self, info, code, inst, errors); case 109: case 110: case 111: - return aarch64_ins_sve_addr_ri_s4xvl (self, info, code, inst, errors); case 112: - return aarch64_ins_sve_addr_ri_s6xvl (self, info, code, inst, errors); + return aarch64_ins_sve_addr_ri_s4xvl (self, info, code, inst, errors); case 113: - return aarch64_ins_sve_addr_ri_s9xvl (self, info, code, inst, errors); + return aarch64_ins_sve_addr_ri_s6xvl (self, info, code, inst, errors); case 114: + return aarch64_ins_sve_addr_ri_s9xvl (self, info, code, inst, errors); case 115: case 116: case 117: - return aarch64_ins_sve_addr_ri_u6 (self, info, code, inst, errors); case 118: + return aarch64_ins_sve_addr_ri_u6 (self, info, code, inst, errors); case 119: case 120: case 121: @@ -803,8 +803,8 @@ aarch64_insert_operand (const aarch64_operand *self, case 129: case 130: case 131: - return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst, errors); case 132: + return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst, errors); case 133: case 134: case 135: @@ -812,52 +812,53 @@ aarch64_insert_operand (const aarch64_operand *self, case 137: case 138: case 139: - return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst, errors); case 140: + return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst, errors); case 141: case 142: case 143: - return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst, errors); case 144: - return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst, errors); case 145: - return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst, errors); case 146: - return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst, errors); case 147: - return aarch64_ins_sve_aimm (self, info, code, inst, errors); + return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst, errors); case 148: + return aarch64_ins_sve_aimm (self, info, code, inst, errors); + case 149: return aarch64_ins_sve_asimm (self, info, code, inst, errors); - case 150: - return aarch64_ins_sve_float_half_one (self, info, code, inst, errors); case 151: - return aarch64_ins_sve_float_half_two (self, info, code, inst, errors); + return aarch64_ins_sve_float_half_one (self, info, code, inst, errors); case 152: + return aarch64_ins_sve_float_half_two (self, info, code, inst, errors); + case 153: return aarch64_ins_sve_float_zero_one (self, info, code, inst, errors); - case 156: + case 157: return aarch64_ins_inv_limm (self, info, code, inst, errors); - case 158: + case 159: return aarch64_ins_sve_limm_mov (self, info, code, inst, errors); - case 160: + case 161: return aarch64_ins_sve_scale (self, info, code, inst, errors); - case 172: case 173: case 174: - return aarch64_ins_sve_shlimm (self, info, code, inst, errors); case 175: + return aarch64_ins_sve_shlimm (self, info, code, inst, errors); case 176: case 177: + case 178: return aarch64_ins_sve_shrimm (self, info, code, inst, errors); - case 195: case 196: case 197: case 198: case 199: + case 200: return aarch64_ins_sve_quad_index (self, info, code, inst, errors); - case 201: - return aarch64_ins_sve_index (self, info, code, inst, errors); case 202: - case 204: + return aarch64_ins_sve_index (self, info, code, inst, errors); + case 203: + case 205: return aarch64_ins_sve_reglist (self, info, code, inst, errors); default: assert (0); abort (); } |