aboutsummaryrefslogtreecommitdiff
path: root/opcodes/aarch64-asm.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2016-09-21 16:56:57 +0100
committerRichard Sandiford <richard.sandiford@arm.com>2016-09-21 16:56:57 +0100
commite950b3453948830c5ce9c2f70d114d0b38a4b4ac (patch)
tree86d0ac10f2bf7783666d4059419b606d3fbb5a38 /opcodes/aarch64-asm.c
parent98907a704908c5877d929c57b2ddb2e5f899d9a9 (diff)
downloadgdb-e950b3453948830c5ce9c2f70d114d0b38a4b4ac.zip
gdb-e950b3453948830c5ce9c2f70d114d0b38a4b4ac.tar.gz
gdb-e950b3453948830c5ce9c2f70d114d0b38a4b4ac.tar.bz2
[AArch64][SVE 27/32] Add SVE integer immediate operands
This patch adds the new SVE integer immediate operands. There are three kinds: - simple signed and unsigned ranges, but with new widths and positions. - 13-bit logical immediates. These have the same form as in base AArch64, but at a different bit position. In the case of the "MOV Zn.<T>, #<limm>" alias of DUPM, the logical immediate <limm> is not allowed to be a valid DUP immediate, since DUP is preferred over DUPM for constants that both instructions can handle. - a new 9-bit arithmetic immediate, of the form "<imm8>{, LSL #8}". In some contexts the operand is signed and in others it's unsigned. As an extension, we allow shifted immediates to be written as a single integer, e.g. "#256" is equivalent to "#1, LSL #8". We also use the shiftless form as the preferred disassembly, except for the special case of "#0, LSL #8" (a redundant encoding of 0). include/ * opcode/aarch64.h (AARCH64_OPND_SIMM5): New aarch64_opnd. (AARCH64_OPND_SVE_AIMM, AARCH64_OPND_SVE_ASIMM) (AARCH64_OPND_SVE_INV_LIMM, AARCH64_OPND_SVE_LIMM) (AARCH64_OPND_SVE_LIMM_MOV, AARCH64_OPND_SVE_SHLIMM_PRED) (AARCH64_OPND_SVE_SHLIMM_UNPRED, AARCH64_OPND_SVE_SHRIMM_PRED) (AARCH64_OPND_SVE_SHRIMM_UNPRED, AARCH64_OPND_SVE_SIMM5) (AARCH64_OPND_SVE_SIMM5B, AARCH64_OPND_SVE_SIMM6) (AARCH64_OPND_SVE_SIMM8, AARCH64_OPND_SVE_UIMM3) (AARCH64_OPND_SVE_UIMM7, AARCH64_OPND_SVE_UIMM8) (AARCH64_OPND_SVE_UIMM8_53): Likewise. (aarch64_sve_dupm_mov_immediate_p): Declare. opcodes/ * aarch64-tbl.h (AARCH64_OPERANDS): Add entries for the new SVE integer immediate operands. * aarch64-opc.h (FLD_SVE_immN, FLD_SVE_imm3, FLD_SVE_imm5) (FLD_SVE_imm5b, FLD_SVE_imm7, FLD_SVE_imm8, FLD_SVE_imm9) (FLD_SVE_immr, FLD_SVE_imms, FLD_SVE_tszh): New aarch64_field_kinds. * aarch64-opc.c (fields): Add corresponding entries. (operand_general_constraint_met_p): Handle the new SVE integer immediate operands. (aarch64_print_operand): Likewise. (aarch64_sve_dupm_mov_immediate_p): New function. * aarch64-opc-2.c: Regenerate. * aarch64-asm.h (ins_inv_limm, ins_sve_aimm, ins_sve_asimm) (ins_sve_limm_mov, ins_sve_shlimm, ins_sve_shrimm): New inserters. * aarch64-asm.c (aarch64_ins_limm_1): New function, split out from... (aarch64_ins_limm): ...here. (aarch64_ins_inv_limm): New function. (aarch64_ins_sve_aimm): Likewise. (aarch64_ins_sve_asimm): Likewise. (aarch64_ins_sve_limm_mov): Likewise. (aarch64_ins_sve_shlimm): Likewise. (aarch64_ins_sve_shrimm): Likewise. * aarch64-asm-2.c: Regenerate. * aarch64-dis.h (ext_inv_limm, ext_sve_aimm, ext_sve_asimm) (ext_sve_limm_mov, ext_sve_shlimm, ext_sve_shrimm): New extractors. * aarch64-dis.c (decode_limm): New function, split out from... (aarch64_ext_limm): ...here. (aarch64_ext_inv_limm): New function. (decode_sve_aimm): Likewise. (aarch64_ext_sve_aimm): Likewise. (aarch64_ext_sve_asimm): Likewise. (aarch64_ext_sve_limm_mov): Likewise. (aarch64_top_bit): Likewise. (aarch64_ext_sve_shlimm): Likewise. (aarch64_ext_sve_shrimm): Likewise. * aarch64-dis-2.c: Regenerate. gas/ * config/tc-aarch64.c (parse_operands): Handle the new SVE integer immediate operands.
Diffstat (limited to 'opcodes/aarch64-asm.c')
-rw-r--r--opcodes/aarch64-asm.c97
1 files changed, 91 insertions, 6 deletions
diff --git a/opcodes/aarch64-asm.c b/opcodes/aarch64-asm.c
index 944a9eb..61d0d95 100644
--- a/opcodes/aarch64-asm.c
+++ b/opcodes/aarch64-asm.c
@@ -452,17 +452,18 @@ aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
return NULL;
}
-/* Insert logical/bitmask immediate for e.g. the last operand in
- ORR <Wd|WSP>, <Wn>, #<imm>. */
-const char *
-aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
- aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED)
+/* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
+ the operand should be inverted before encoding. */
+static const char *
+aarch64_ins_limm_1 (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst, bfd_boolean invert_p)
{
aarch64_insn value;
uint64_t imm = info->imm.value;
int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
- if (inst->opcode->op == OP_BIC)
+ if (invert_p)
imm = ~imm;
if (aarch64_logical_immediate_p (imm, esize, &value) == FALSE)
/* The constraint check should have guaranteed this wouldn't happen. */
@@ -473,6 +474,25 @@ aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
return NULL;
}
+/* Insert logical/bitmask immediate for e.g. the last operand in
+ ORR <Wd|WSP>, <Wn>, #<imm>. */
+const char *
+aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
+ aarch64_insn *code, const aarch64_inst *inst)
+{
+ return aarch64_ins_limm_1 (self, info, code, inst,
+ inst->opcode->op == OP_BIC);
+}
+
+/* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
+const char *
+aarch64_ins_inv_limm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ return aarch64_ins_limm_1 (self, info, code, inst, TRUE);
+}
+
/* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
const char *
@@ -903,6 +923,30 @@ aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
return aarch64_ext_sve_addr_zz (self, info, code);
}
+/* Encode an SVE ADD/SUB immediate. */
+const char *
+aarch64_ins_sve_aimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ if (info->shifter.amount == 8)
+ insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
+ else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
+ insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
+ else
+ insert_all_fields (self, code, info->imm.value & 0xff);
+ return NULL;
+}
+
+/* Encode an SVE CPY/DUP immediate. */
+const char *
+aarch64_ins_sve_asimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ return aarch64_ins_sve_aimm (self, info, code, inst);
+}
+
/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
array specifies which field to use for Zn. MM is encoded in the
concatenation of imm5 and SVE_tszh, with imm5 being the less
@@ -919,6 +963,15 @@ aarch64_ins_sve_index (const aarch64_operand *self,
return NULL;
}
+/* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
+const char *
+aarch64_ins_sve_limm_mov (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ return aarch64_ins_limm (self, info, code, inst);
+}
+
/* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
to use for Zn. */
const char *
@@ -943,6 +996,38 @@ aarch64_ins_sve_scale (const aarch64_operand *self,
return NULL;
}
+/* Encode an SVE shift left immediate. */
+const char *
+aarch64_ins_sve_shlimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ const aarch64_opnd_info *prev_operand;
+ unsigned int esize;
+
+ assert (info->idx > 0);
+ prev_operand = &inst->operands[info->idx - 1];
+ esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
+ insert_all_fields (self, code, 8 * esize + info->imm.value);
+ return NULL;
+}
+
+/* Encode an SVE shift right immediate. */
+const char *
+aarch64_ins_sve_shrimm (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst)
+{
+ const aarch64_opnd_info *prev_operand;
+ unsigned int esize;
+
+ assert (info->idx > 0);
+ prev_operand = &inst->operands[info->idx - 1];
+ esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
+ insert_all_fields (self, code, 16 * esize - info->imm.value);
+ return NULL;
+}
+
/* Miscellaneous encoding functions. */
/* Encode size[0], i.e. bit 22, for