aboutsummaryrefslogtreecommitdiff
path: root/opcodes/aarch64-tbl.h
diff options
context:
space:
mode:
authorMatthew Wahab <matthew.wahab@arm.com>2015-12-14 17:46:21 +0000
committerMatthew Wahab <matthew.wahab@arm.com>2015-12-14 17:46:21 +0000
commit4fd0a9fd005ea3affe8e61f6ec82817055a3bc2b (patch)
tree5bf37bc494996c35fe21429720bdce25e01cc00d /opcodes/aarch64-tbl.h
parentb5b0f34c669a91b9d873221ea3d688cf7f495ab5 (diff)
downloadgdb-4fd0a9fd005ea3affe8e61f6ec82817055a3bc2b.zip
gdb-4fd0a9fd005ea3affe8e61f6ec82817055a3bc2b.tar.gz
gdb-4fd0a9fd005ea3affe8e61f6ec82817055a3bc2b.tar.bz2
[AArch64][PATCH 14/14] Support FP16 Adv.SIMD Scalar Shift By Immediate instructions.
ARMv8.2 adds 16-bit floating point operations as an optional extension to the floating point and Adv.SIMD support. This patch extends instructions in the group Adv.SIMD Scalar Shift By Immediate to support FP16, making this support available when +simd+fp16 is enabled. The extended instructions are: SCVTF, FCVTZS, UCVTF and FCVTZU. The general form for these instructions is <OP> <Hd>, <Hs>, #<imm> gas/testsuite/ 2015-12-14 Matthew Wahab <matthew.wahab@arm.com> * gas/aarch64/advsimd-fp16.d: Update expected output. * gas/aarch64/advsimd-fp16: Add tests for Adv.SIMD scalar shift by immediate instructions. opcodes/ 2015-12-14 Matthew Wahab <matthew.wahab@arm.com> * aarch64-asm-2.c: Regenerate. * aarch64-dis-2.c: Regenerate. * aarch64-opc-2.c: Regenerate. * aarch64-tbl.h (QL_SSHIFT_H): New. (aarch64_opcode_table): Add fp16 versions of scvtf, fcvtzs, ucvtf and fcvtzu to the Adv.SIMD scalar shift by immediate group. Change-Id: I40506496f52dd96909e7344f243b38a1870df7ff
Diffstat (limited to 'opcodes/aarch64-tbl.h')
-rw-r--r--opcodes/aarch64-tbl.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/opcodes/aarch64-tbl.h b/opcodes/aarch64-tbl.h
index 9845caf..11c97af 100644
--- a/opcodes/aarch64-tbl.h
+++ b/opcodes/aarch64-tbl.h
@@ -375,6 +375,12 @@
QLF3(S_D , S_D , S_D ) \
}
+/* e.g. UCVTF <Vd>.<T>, <Vn>.<T>, #<fbits>. */
+#define QL_SSHIFT_H \
+{ \
+ QLF3 (S_H, S_H, S_H) \
+}
+
/* e.g. SQSHRUN <Vb><d>, <Va><n>, #<shift>. */
#define QL_SSHIFTN \
{ \
@@ -2106,7 +2112,11 @@ struct aarch64_opcode aarch64_opcode_table[] =
{"sqshrn", 0x5f009400, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFTN, 0},
{"sqrshrn", 0x5f009c00, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFTN, 0},
{"scvtf", 0x5f00e400, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_SD, 0},
+ {"scvtf", 0x5f10e400, 0xff80fc00, asisdshf, 0, SIMD_F16,
+ OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_H, 0},
{"fcvtzs", 0x5f00fc00, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_SD, 0},
+ {"fcvtzs", 0x5f10fc00, 0xff80fc00, asisdshf, 0, SIMD_F16,
+ OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_H, 0},
{"ushr", 0x7f000400, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_D, 0},
{"usra", 0x7f001400, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_D, 0},
{"urshr", 0x7f002400, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_D, 0},
@@ -2120,7 +2130,11 @@ struct aarch64_opcode aarch64_opcode_table[] =
{"uqshrn", 0x7f009400, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFTN, 0},
{"uqrshrn", 0x7f009c00, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFTN, 0},
{"ucvtf", 0x7f00e400, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_SD, 0},
+ {"ucvtf", 0x7f10e400, 0xff80fc00, asisdshf, 0, SIMD_F16,
+ OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_H, 0},
{"fcvtzu", 0x7f00fc00, 0xff80fc00, asisdshf, 0, SIMD, OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_SD, 0},
+ {"fcvtzu", 0x7f10fc00, 0xff80fc00, asisdshf, 0, SIMD_F16,
+ OP3 (Sd, Sn, IMM_VLSR), QL_SSHIFT_H, 0},
/* Bitfield. */
{"sbfm", 0x13000000, 0x7f800000, bitfield, 0, CORE, OP4 (Rd, Rn, IMMR, IMMS), QL_BF, F_HAS_ALIAS | F_SF | F_N},
{"sbfiz", 0x13000000, 0x7f800000, bitfield, OP_SBFIZ, CORE, OP4 (Rd, Rn, IMM, WIDTH), QL_BF2, F_ALIAS | F_P1 | F_CONV},