aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorMatthew Malcomson <matthew.malcomson@arm.com>2019-11-07 17:10:01 +0000
committerMatthew Malcomson <matthew.malcomson@arm.com>2019-11-07 17:11:52 +0000
commit8382113fdb028386a335e8dee9ac04ebc8cf04a1 (patch)
treec2e565fa25ec35788e5b2dbc3212629a3218ee64 /include
parenteb5bbc482128b08d2ee8a2470951a74d8351146f (diff)
downloadfsf-binutils-gdb-8382113fdb028386a335e8dee9ac04ebc8cf04a1.zip
fsf-binutils-gdb-8382113fdb028386a335e8dee9ac04ebc8cf04a1.tar.gz
fsf-binutils-gdb-8382113fdb028386a335e8dee9ac04ebc8cf04a1.tar.bz2
[binutils][aarch64] Matrix Multiply extension enablement [8/X]
Hi, This patch is part of a series that adds support for Armv8.6-A (Matrix Multiply and BFloat16 extensions) to binutils. This patch introduces the Matrix Multiply (Int8, F32, F64) extensions to the aarch64 backend. The following instructions are added: {s/u}mmla, usmmla, {us/su}dot, fmmla, ld1rob, ld1roh, d1row, ld1rod, uzip{1/2}, trn{1/2}. Committed on behalf of Mihail Ionescu. gas/ChangeLog: 2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com> * config/tc-aarch64.c: Add new arch fetures to suppport the mm extension. (parse_operands): Add new operand. * testsuite/gas/aarch64/i8mm.s: New test. * testsuite/gas/aarch64/i8mm.d: New test. * testsuite/gas/aarch64/f32mm.s: New test. * testsuite/gas/aarch64/f32mm.d: New test. * testsuite/gas/aarch64/f64mm.s: New test. * testsuite/gas/aarch64/f64mm.d: New test. * testsuite/gas/aarch64/sve-movprfx-mm.s: New test. * testsuite/gas/aarch64/sve-movprfx-mm.d: New test. include/ChangeLog: 2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com> * opcode/aarch64.h (AARCH64_FEATURE_I8MM): New. (AARCH64_FEATURE_F32MM): New. (AARCH64_FEATURE_F64MM): New. (AARCH64_OPND_SVE_ADDR_RI_S4x32): New. (enum aarch64_insn_class): Add new instruction class "aarch64_misc" for instructions that do not require special handling. opcodes/ChangeLog: 2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com> * aarch64-tbl.h (aarch64_feature_i8mm_sve, aarch64_feature_f32mm_sve, aarch64_feature_f64mm_sve, aarch64_feature_i8mm, aarch64_feature_f32mm, aarch64_feature_f64mm): New feature sets. (INT8MATMUL_INSN, F64MATMUL_SVE_INSN, F64MATMUL_INSN, F32MATMUL_SVE_INSN, F32MATMUL_INSN): New macros to define matrix multiply instructions. (I8MM_SVE, F32MM_SVE, F64MM_SVE, I8MM, F32MM, F64MM): New feature set macros. (QL_MMLA64, OP_SVE_SBB): New qualifiers. (OP_SVE_QQQ): New qualifier. (INT8MATMUL_SVE_INSNC, F64MATMUL_SVE_INSNC, F32MATMUL_SVE_INSNC): New feature set for bfloat16 instructions to support the movprfx constraint. (aarch64_opcode_table): Support for SVE_ADDR_RI_S4x32. (aarch64_opcode_table): Define new instructions smmla, ummla, usmmla, usdot, sudot, fmmla, ld1rob, ld1roh, ld1row, ld1rod uzip{1/2}, trn{1/2}. * aarch64-opc.c (operand_general_constraint_met_p): Handle AARCH64_OPND_SVE_ADDR_RI_S4x32. (aarch64_print_operand): Handle AARCH64_OPND_SVE_ADDR_RI_S4x32. * aarch64-dis-2.c (aarch64_opcode_lookup_1, aarch64_find_next_opcode): Account for new instructions. * opcodes/aarch64-asm-2.c (aarch64_insert_operand): Support the new S4x32 operand. * aarch64-opc-2.c (aarch64_operands): Support the new S4x32 operand. Regression tested on arm-none-eabi. Is it ok for trunk? Regards, Mihail
Diffstat (limited to 'include')
-rw-r--r--include/ChangeLog9
-rw-r--r--include/opcode/aarch64.h10
2 files changed, 18 insertions, 1 deletions
diff --git a/include/ChangeLog b/include/ChangeLog
index 930d931..2543e09 100644
--- a/include/ChangeLog
+++ b/include/ChangeLog
@@ -1,4 +1,13 @@
2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com>
+
+ * opcode/aarch64.h (AARCH64_FEATURE_I8MM): New.
+ (AARCH64_FEATURE_F32MM): New.
+ (AARCH64_FEATURE_F64MM): New.
+ (AARCH64_OPND_SVE_ADDR_RI_S4x32): New.
+ (enum aarch64_insn_class): Add new instruction class "aarch64_misc" for
+ instructions that do not require special handling.
+
+2019-11-07 Mihail Ionescu <mihail.ionescu@arm.com>
2019-11-07 Matthew Malcomson <matthew.malcomson@arm.com>
* opcode/arm.h (ARM_EXT2_V8_6A, ARM_AEXT2_V8_6A,
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 4cda1e0..d9f3d09 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -91,6 +91,11 @@ typedef uint32_t aarch64_insn;
/* Transactional Memory Extension. */
#define AARCH64_FEATURE_TME 0x2000000000000ULL
+/* Matrix Multiply instructions */
+#define AARCH64_FEATURE_I8MM 0x10000000000000ULL
+#define AARCH64_FEATURE_F32MM 0x20000000000000ULL
+#define AARCH64_FEATURE_F64MM 0x40000000000000ULL
+
/* SVE2 instructions. */
#define AARCH64_FEATURE_SVE2 0x000000010
#define AARCH64_FEATURE_SVE2_AES 0x000000080
@@ -133,7 +138,8 @@ typedef uint32_t aarch64_insn;
| AARCH64_FEATURE_SSBS)
#define AARCH64_ARCH_V8_6 AARCH64_FEATURE (AARCH64_ARCH_V8_5, \
AARCH64_FEATURE_V8_6 \
- | AARCH64_FEATURE_BFLOAT16)
+ | AARCH64_FEATURE_BFLOAT16 \
+ | AARCH64_FEATURE_I8MM)
#define AARCH64_ARCH_NONE AARCH64_FEATURE (0, 0)
#define AARCH64_ANY AARCH64_FEATURE (-1, 0) /* Any basic core. */
@@ -322,6 +328,7 @@ enum aarch64_opnd
AARCH64_OPND_BTI_TARGET, /* BTI {<target>}. */
AARCH64_OPND_SVE_ADDR_RI_S4x16, /* SVE [<Xn|SP>, #<simm4>*16]. */
+ AARCH64_OPND_SVE_ADDR_RI_S4x32, /* SVE [<Xn|SP>, #<simm4>*32]. */
AARCH64_OPND_SVE_ADDR_RI_S4xVL, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
AARCH64_OPND_SVE_ADDR_RI_S4x2xVL, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
AARCH64_OPND_SVE_ADDR_RI_S4x3xVL, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
@@ -520,6 +527,7 @@ enum aarch64_opnd_qualifier
enum aarch64_insn_class
{
+ aarch64_misc,
addsub_carry,
addsub_ext,
addsub_imm,