aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTamar Christina <tamar.christina@arm.com>2018-10-03 18:38:42 +0100
committerTamar Christina <tamar.christina@arm.com>2018-10-03 18:49:37 +0100
commita68f4cd235a36776d3d9fea7291163b8d8e35869 (patch)
tree0bb4db63cd8160de4e3f5d4868aa3d5613f73b6f
parent755b748fd9fbee8cad2e55535d23298e8ac76b15 (diff)
downloadgdb-a68f4cd235a36776d3d9fea7291163b8d8e35869.zip
gdb-a68f4cd235a36776d3d9fea7291163b8d8e35869.tar.gz
gdb-a68f4cd235a36776d3d9fea7291163b8d8e35869.tar.bz2
AArch64: Add SVE constraints verifier.
This patch adds the verification rules for move prefix constraints. The Arm SVE instruction MOVPRFX introduces[1] constraints on the instruction at PC+4. Particularly the following constraints are handled by this patch * MOVPRFX must be followed by an instruction. * MOVPRFX can only be followed by non-layout altering directives. * MOVPRFX destination register MUST be used as the destination register in the instruction at PC+4, and is not allowed to be used in any other position other than destructive input. This includes registers that architecturally overlap. e.g. x1 should be treated as z1. * MOVPRFX must be followed by a restricted set of SVE instructions. * The size of the destination register of MOVPRFX must be equal to that of the operation at PC+4. * The predicate register and operation of MOVPRFX must match that of the instruction at PC+4 * The predicated instruction at PC+4 must use the merging predicate. * Architectural aliases and pseudo-instructions need to be supported as well. * MOVPRFX cannot be the last instruction in a sequence Any failure to adhere to any of these constrains will emit an assembly warning and a disassembly note. [1] https://developer.arm.com/docs/ddi0584/latest/arm-architecture-reference-manual-supplement-the-scalable-vector-extension-sve-for-armv8-a include/ * opcode/aarch64.h (aarch64_inst): Remove. (enum err_type): Add ERR_VFI. (aarch64_is_destructive_by_operands): New. (init_insn_sequence): New. (aarch64_decode_insn): Remove param name. opcodes/ * aarch64-opc.c (init_insn_block): New. (verify_constraints, aarch64_is_destructive_by_operands): New. * aarch64-opc.h (verify_constraints): New. gas/ * config/tc-aarch64.c (output_operand_error_report): Order warnings.
-rw-r--r--gas/ChangeLog4
-rw-r--r--gas/config/tc-aarch64.c6
-rw-r--r--include/ChangeLog8
-rw-r--r--include/opcode/aarch64.h10
-rw-r--r--opcodes/ChangeLog6
-rw-r--r--opcodes/aarch64-opc.c349
-rw-r--r--opcodes/aarch64-opc.h4
7 files changed, 382 insertions, 5 deletions
diff --git a/gas/ChangeLog b/gas/ChangeLog
index 68fc927..b4cafbb 100644
--- a/gas/ChangeLog
+++ b/gas/ChangeLog
@@ -1,5 +1,9 @@
2018-10-03 Tamar Christina <tamar.christina@arm.com>
+ * config/tc-aarch64.c (output_operand_error_report): Order warnings.
+
+2018-10-03 Tamar Christina <tamar.christina@arm.com>
+
* config/tc-aarch64.c (now_instr_sequence):
(*insn_sequence, now_instr_sequence): New.
(output_operand_error_record, do_encode): Add insn_sequence.
diff --git a/gas/config/tc-aarch64.c b/gas/config/tc-aarch64.c
index 206019b..a5fe6bc 100644
--- a/gas/config/tc-aarch64.c
+++ b/gas/config/tc-aarch64.c
@@ -4807,10 +4807,12 @@ output_operand_error_report (char *str, bfd_boolean non_fatal_only)
{
gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
- if (operand_error_higher_severity_p (curr->detail.kind, kind))
+ if (operand_error_higher_severity_p (curr->detail.kind, kind)
+ && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
kind = curr->detail.kind;
}
- gas_assert (kind != AARCH64_OPDE_NIL);
+
+ gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
/* Pick up one of errors of KIND to report. */
largest_error_pos = -2; /* Index can be -1 which means unknown index. */
diff --git a/include/ChangeLog b/include/ChangeLog
index dc200a1..a909db3 100644
--- a/include/ChangeLog
+++ b/include/ChangeLog
@@ -1,5 +1,13 @@
2018-10-03 Tamar Christina <tamar.christina@arm.com>
+ * opcode/aarch64.h (aarch64_inst): Remove.
+ (enum err_type): Add ERR_VFI.
+ (aarch64_is_destructive_by_operands): New.
+ (init_insn_sequence): New.
+ (aarch64_decode_insn): Remove param name.
+
+2018-10-03 Tamar Christina <tamar.christina@arm.com>
+
* opcode/aarch64.h (struct aarch64_opcode): Expand verifiers to take
more arguments.
diff --git a/include/opcode/aarch64.h b/include/opcode/aarch64.h
index 751d7bb..f66ee86 100644
--- a/include/opcode/aarch64.h
+++ b/include/opcode/aarch64.h
@@ -648,6 +648,7 @@ enum err_type
ERR_UND,
ERR_UNP,
ERR_NYI,
+ ERR_VFI,
ERR_NR_ENTRIES
};
@@ -1065,7 +1066,6 @@ struct aarch64_inst
aarch64_opnd_info operands[AARCH64_MAX_OPND_NUM];
};
-typedef struct aarch64_inst aarch64_inst;
/* Diagnosis related declaration and interface. */
@@ -1190,6 +1190,9 @@ extern aarch64_opnd_qualifier_t
aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *, int,
const aarch64_opnd_qualifier_t, int);
+extern bfd_boolean
+aarch64_is_destructive_by_operands (const aarch64_opcode *);
+
extern int
aarch64_num_of_operands (const aarch64_opcode *);
@@ -1201,7 +1204,10 @@ aarch64_zero_register_p (const aarch64_opnd_info *);
extern enum err_type
aarch64_decode_insn (aarch64_insn, aarch64_inst *, bfd_boolean,
- aarch64_operand_error *errors);
+ aarch64_operand_error *);
+
+extern void
+init_insn_sequence (const struct aarch64_inst *, aarch64_instr_sequence *);
/* Given an operand qualifier, return the expected data element size
of a qualified operand. */
diff --git a/opcodes/ChangeLog b/opcodes/ChangeLog
index aae5459..2600d6c 100644
--- a/opcodes/ChangeLog
+++ b/opcodes/ChangeLog
@@ -1,5 +1,11 @@
2018-10-03 Tamar Christina <tamar.christina@arm.com>
+ * aarch64-opc.c (init_insn_block): New.
+ (verify_constraints, aarch64_is_destructive_by_operands): New.
+ * aarch64-opc.h (verify_constraints): New.
+
+2018-10-03 Tamar Christina <tamar.christina@arm.com>
+
* aarch64-dis.c (aarch64_opcode_decode): Update verifier call.
* aarch64-opc.c (verify_ldpsw): Update arguments.
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index f35f0d6..c28cac4 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -832,6 +832,25 @@ dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
}
#endif /* DEBUG_AARCH64 */
+/* This function checks if the given instruction INSN is a destructive
+ instruction based on the usage of the registers. It does not recognize
+ unary destructive instructions. */
+bfd_boolean
+aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
+{
+ int i = 0;
+ const enum aarch64_opnd *opnds = opcode->operands;
+
+ if (opnds[0] == AARCH64_OPND_NIL)
+ return FALSE;
+
+ while (opnds[++i] != AARCH64_OPND_NIL)
+ if (opnds[i] == opnds[0])
+ return TRUE;
+
+ return FALSE;
+}
+
/* TODO improve this, we can have an extra field at the runtime to
store the number of operands rather than calculating it every time. */
@@ -4491,7 +4510,7 @@ verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
bfd_boolean encoding ATTRIBUTE_UNUSED,
aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
- aarch64_instr_sequence *insn_block ATTRIBUTE_UNUSED)
+ aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
{
int t = BITS (insn, 4, 0);
int n = BITS (insn, 9, 5);
@@ -4514,6 +4533,334 @@ verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
return ERR_OK;
}
+/* Initialize an instruction sequence insn_sequence with the instruction INST.
+ If INST is NULL the given insn_sequence is cleared and the sequence is left
+ uninitialized. */
+
+void
+init_insn_sequence (const struct aarch64_inst *inst,
+ aarch64_instr_sequence *insn_sequence)
+{
+ int num_req_entries = 0;
+ insn_sequence->next_insn = 0;
+ insn_sequence->num_insns = num_req_entries;
+ if (insn_sequence->instr)
+ XDELETE (insn_sequence->instr);
+ insn_sequence->instr = NULL;
+
+ if (inst)
+ {
+ insn_sequence->instr = XNEW (aarch64_inst);
+ memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
+ }
+
+ /* Handle all the cases here. May need to think of something smarter than
+ a giant if/else chain if this grows. At that time, a lookup table may be
+ best. */
+ if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
+ num_req_entries = 1;
+
+ if (insn_sequence->current_insns)
+ XDELETEVEC (insn_sequence->current_insns);
+ insn_sequence->current_insns = NULL;
+
+ if (num_req_entries != 0)
+ {
+ size_t size = num_req_entries * sizeof (aarch64_inst);
+ insn_sequence->current_insns
+ = (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
+ memset (insn_sequence->current_insns, 0, size);
+ }
+}
+
+
+/* This function verifies that the instruction INST adheres to its specified
+ constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
+ returned and MISMATCH_DETAIL contains the reason why verification failed.
+
+ The function is called both during assembly and disassembly. If assembling
+ then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
+ and will contain the PC of the current instruction w.r.t to the section.
+
+ If ENCODING and PC=0 then you are at a start of a section. The constraints
+ are verified against the given state insn_sequence which is updated as it
+ transitions through the verification. */
+
+enum err_type
+verify_constraints (const struct aarch64_inst *inst,
+ const aarch64_insn insn ATTRIBUTE_UNUSED,
+ bfd_vma pc,
+ bfd_boolean encoding,
+ aarch64_operand_error *mismatch_detail,
+ aarch64_instr_sequence *insn_sequence)
+{
+ assert (inst);
+ assert (inst->opcode);
+
+ const struct aarch64_opcode *opcode = inst->opcode;
+ if (!opcode->constraints && !insn_sequence->instr)
+ return ERR_OK;
+
+ assert (insn_sequence);
+
+ enum err_type res = ERR_OK;
+
+ /* This instruction puts a constraint on the insn_sequence. */
+ if (opcode->flags & F_SCAN)
+ {
+ if (insn_sequence->instr)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("instruction opens new dependency "
+ "sequence without ending previous one");
+ mismatch_detail->index = -1;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ }
+
+ init_insn_sequence (inst, insn_sequence);
+ return res;
+ }
+
+ /* Verify constraints on an existing sequence. */
+ if (insn_sequence->instr)
+ {
+ const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
+ /* If we're decoding and we hit PC=0 with an open sequence then we haven't
+ closed a previous one that we should have. */
+ if (!encoding && pc == 0)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("previous `movprfx' sequence not closed");
+ mismatch_detail->index = -1;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ /* Reset the sequence. */
+ init_insn_sequence (NULL, insn_sequence);
+ return res;
+ }
+
+ /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
+ if (inst_opcode->constraints & C_SCAN_MOVPRFX)
+ {
+ /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
+ instruction for better error messages. */
+ if (!opcode->avariant || !(*opcode->avariant & AARCH64_FEATURE_SVE))
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("SVE instruction expected after "
+ "`movprfx'");
+ mismatch_detail->index = -1;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+
+ /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
+ instruction that is allowed to be used with a MOVPRFX. */
+ if (!(opcode->constraints & C_SCAN_MOVPRFX))
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("SVE `movprfx' compatible instruction "
+ "expected");
+ mismatch_detail->index = -1;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+
+ /* Next check for usage of the predicate register. */
+ aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
+ aarch64_opnd_info blk_pred = {0}, inst_pred = {0};
+ bfd_boolean predicated = FALSE;
+ assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
+
+ /* Determine if the movprfx instruction used is predicated or not. */
+ if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
+ {
+ predicated = TRUE;
+ blk_pred = insn_sequence->instr->operands[1];
+ }
+
+ unsigned char max_elem_size = 0;
+ unsigned char current_elem_size;
+ int num_op_used = 0, last_op_usage = 0;
+ int i, inst_pred_idx = -1;
+ int num_ops = aarch64_num_of_operands (opcode);
+ for (i = 0; i < num_ops; i++)
+ {
+ aarch64_opnd_info inst_op = inst->operands[i];
+ switch (inst_op.type)
+ {
+ case AARCH64_OPND_SVE_Zd:
+ case AARCH64_OPND_SVE_Zm_5:
+ case AARCH64_OPND_SVE_Zm_16:
+ case AARCH64_OPND_SVE_Zn:
+ case AARCH64_OPND_SVE_Zt:
+ case AARCH64_OPND_SVE_Vm:
+ case AARCH64_OPND_SVE_Vn:
+ case AARCH64_OPND_Va:
+ case AARCH64_OPND_Vn:
+ case AARCH64_OPND_Vm:
+ case AARCH64_OPND_Sn:
+ case AARCH64_OPND_Sm:
+ case AARCH64_OPND_Rn:
+ case AARCH64_OPND_Rm:
+ case AARCH64_OPND_Rn_SP:
+ case AARCH64_OPND_Rm_SP:
+ if (inst_op.reg.regno == blk_dest.reg.regno)
+ {
+ num_op_used++;
+ last_op_usage = i;
+ }
+ current_elem_size
+ = aarch64_get_qualifier_esize (inst_op.qualifier);
+ if (current_elem_size > max_elem_size)
+ max_elem_size = current_elem_size;
+ break;
+ case AARCH64_OPND_SVE_Pd:
+ case AARCH64_OPND_SVE_Pg3:
+ case AARCH64_OPND_SVE_Pg4_5:
+ case AARCH64_OPND_SVE_Pg4_10:
+ case AARCH64_OPND_SVE_Pg4_16:
+ case AARCH64_OPND_SVE_Pm:
+ case AARCH64_OPND_SVE_Pn:
+ case AARCH64_OPND_SVE_Pt:
+ inst_pred = inst_op;
+ inst_pred_idx = i;
+ break;
+ default:
+ break;
+ }
+ }
+
+ assert (max_elem_size != 0);
+ aarch64_opnd_info inst_dest = inst->operands[0];
+ /* Determine the size that should be used to compare against the
+ movprfx size. */
+ current_elem_size
+ = opcode->constraints & C_MAX_ELEM
+ ? max_elem_size
+ : aarch64_get_qualifier_esize (inst_dest.qualifier);
+
+ /* If movprfx is predicated do some extra checks. */
+ if (predicated)
+ {
+ /* The instruction must be predicated. */
+ if (inst_pred_idx < 0)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("predicated instruction expected "
+ "after `movprfx'");
+ mismatch_detail->index = -1;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+
+ /* The instruction must have a merging predicate. */
+ if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("merging predicate expected due "
+ "to preceding `movprfx'");
+ mismatch_detail->index = inst_pred_idx;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+
+ /* The same register must be used in instruction. */
+ if (blk_pred.reg.regno != inst_pred.reg.regno)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("predicate register differs "
+ "from that in preceding "
+ "`movprfx'");
+ mismatch_detail->index = inst_pred_idx;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+ }
+
+ /* Destructive operations by definition must allow one usage of the
+ same register. */
+ int allowed_usage
+ = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
+
+ /* Operand is not used at all. */
+ if (num_op_used == 0)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("output register of preceding "
+ "`movprfx' not used in current "
+ "instruction");
+ mismatch_detail->index = 0;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+
+ /* We now know it's used, now determine exactly where it's used. */
+ if (blk_dest.reg.regno != inst_dest.reg.regno)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("output register of preceding "
+ "`movprfx' expected as output");
+ mismatch_detail->index = 0;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+
+ /* Operand used more than allowed for the specific opcode type. */
+ if (num_op_used > allowed_usage)
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("output register of preceding "
+ "`movprfx' used as input");
+ mismatch_detail->index = last_op_usage;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+
+ /* Now the only thing left is the qualifiers checks. The register
+ must have the same maximum element size. */
+ if (inst_dest.qualifier
+ && blk_dest.qualifier
+ && current_elem_size
+ != aarch64_get_qualifier_esize (blk_dest.qualifier))
+ {
+ mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
+ mismatch_detail->error = _("register size not compatible with "
+ "previous `movprfx'");
+ mismatch_detail->index = 0;
+ mismatch_detail->non_fatal = TRUE;
+ res = ERR_VFI;
+ goto done;
+ }
+ }
+
+done:
+ /* Add the new instruction to the sequence. */
+ memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
+ inst, sizeof (aarch64_inst));
+
+ /* Check if sequence is now full. */
+ if (insn_sequence->next_insn >= insn_sequence->num_insns)
+ {
+ /* Sequence is full, but we don't have anything special to do for now,
+ so clear and reset it. */
+ init_insn_sequence (NULL, insn_sequence);
+ }
+ }
+
+ return res;
+}
+
+
/* Return true if VALUE cannot be moved into an SVE register using DUP
(with any element size, not just ESIZE) and if using DUPM would
therefore be OK. ESIZE is the number of bytes in the immediate. */
diff --git a/opcodes/aarch64-opc.h b/opcodes/aarch64-opc.h
index f741dea..0686493 100644
--- a/opcodes/aarch64-opc.h
+++ b/opcodes/aarch64-opc.h
@@ -183,6 +183,10 @@ typedef struct aarch64_operand aarch64_operand;
extern const aarch64_operand aarch64_operands[];
+enum err_type
+verify_constraints (const struct aarch64_inst *, const aarch64_insn, bfd_vma,
+ bfd_boolean, aarch64_operand_error *, aarch64_instr_sequence*);
+
/* Operand flags. */
#define OPD_F_HAS_INSERTER 0x00000001