aboutsummaryrefslogtreecommitdiff
path: root/opcodes/aarch64-opc.c
diff options
context:
space:
mode:
Diffstat (limited to 'opcodes/aarch64-opc.c')
-rw-r--r--opcodes/aarch64-opc.c136
1 files changed, 68 insertions, 68 deletions
diff --git a/opcodes/aarch64-opc.c b/opcodes/aarch64-opc.c
index 1f04aad..79b37bf 100644
--- a/opcodes/aarch64-opc.c
+++ b/opcodes/aarch64-opc.c
@@ -32,7 +32,7 @@
#include "aarch64-opc.h"
#ifdef DEBUG_AARCH64
-int debug_dump = FALSE;
+int debug_dump = false;
#endif /* DEBUG_AARCH64 */
/* The enumeration strings associated with each value of a 5-bit SVE
@@ -102,14 +102,14 @@ const char *const aarch64_sve_prfop_array[16] = {
/* Helper functions to determine which operand to be used to encode/decode
the size:Q fields for AdvSIMD instructions. */
-static inline bfd_boolean
+static inline bool
vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
{
return (qualifier >= AARCH64_OPND_QLF_V_8B
&& qualifier <= AARCH64_OPND_QLF_V_1Q);
}
-static inline bfd_boolean
+static inline bool
fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
{
return (qualifier >= AARCH64_OPND_QLF_S_B
@@ -423,7 +423,7 @@ aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
enum aarch64_modifier_kind
aarch64_get_operand_modifier_from_value (aarch64_insn value,
- bfd_boolean extend_p)
+ bool extend_p)
{
if (extend_p)
return AARCH64_MOD_UXTB + value;
@@ -431,13 +431,13 @@ aarch64_get_operand_modifier_from_value (aarch64_insn value,
return AARCH64_MOD_LSL - value;
}
-bfd_boolean
+bool
aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
{
return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
}
-static inline bfd_boolean
+static inline bool
aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
{
return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
@@ -760,13 +760,13 @@ struct operand_qualifier_data aarch64_opnd_qualifiers[] =
{0, 0, 0, "retrieving", 0},
};
-static inline bfd_boolean
+static inline bool
operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
{
return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
}
-static inline bfd_boolean
+static inline bool
qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
{
return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
@@ -856,20 +856,20 @@ dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
/* This function checks if the given instruction INSN is a destructive
instruction based on the usage of the registers. It does not recognize
unary destructive instructions. */
-bfd_boolean
+bool
aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
{
int i = 0;
const enum aarch64_opnd *opnds = opcode->operands;
if (opnds[0] == AARCH64_OPND_NIL)
- return FALSE;
+ return false;
while (opnds[++i] != AARCH64_OPND_NIL)
if (opnds[i] == opnds[0])
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
/* TODO improve this, we can have an extra field at the runtime to
@@ -1021,7 +1021,7 @@ aarch64_find_best_match (const aarch64_inst *inst,
succeeds. */
static int
-match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
+match_operands_qualifier (aarch64_inst *inst, bool update_p)
{
int i, nops;
aarch64_opnd_qualifier_seq_t qualifiers;
@@ -1039,7 +1039,7 @@ match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
nops = aarch64_num_of_operands (inst->opcode);
for (i = 0; i < nops; ++i)
if (inst->operands[i].qualifier != qualifiers[i])
- return FALSE;
+ return false;
}
/* Update the qualifiers. */
@@ -1066,7 +1066,7 @@ match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
amount will be returned in *SHIFT_AMOUNT. */
-bfd_boolean
+bool
aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
{
int amount;
@@ -1080,7 +1080,7 @@ aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
permitted. */
if (value >> 32 != 0 && value >> 32 != 0xffffffff)
/* Immediate out of range. */
- return FALSE;
+ return false;
value &= 0xffffffff;
}
@@ -1097,16 +1097,16 @@ aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
if (amount == -1)
{
- DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
- return FALSE;
+ DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
+ return false;
}
if (shift_amount != NULL)
*shift_amount = amount;
- DEBUG_TRACE ("exit TRUE with amount %d", amount);
+ DEBUG_TRACE ("exit true with amount %d", amount);
- return TRUE;
+ return true;
}
/* Build the accepted values for immediate logical SIMD instructions.
@@ -1239,12 +1239,12 @@ build_immediate_table (void)
If ENCODING is not NULL, on the return of TRUE, the standard encoding for
VALUE will be returned in *ENCODING. */
-bfd_boolean
+bool
aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
{
simd_imm_encoding imm_enc;
const simd_imm_encoding *imm_encoding;
- static bfd_boolean initialized = FALSE;
+ static bool initialized = false;
uint64_t upper;
int i;
@@ -1254,14 +1254,14 @@ aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
if (!initialized)
{
build_immediate_table ();
- initialized = TRUE;
+ initialized = true;
}
/* Allow all zeros or all ones in top bits, so that
constant expressions like ~1 are permitted. */
upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
if ((value & ~upper) != value && (value | upper) != value)
- return FALSE;
+ return false;
/* Replicate to a full 64-bit value. */
value &= ~upper;
@@ -1274,13 +1274,13 @@ aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
if (imm_encoding == NULL)
{
- DEBUG_TRACE ("exit with FALSE");
- return FALSE;
+ DEBUG_TRACE ("exit with false");
+ return false;
}
if (encoding != NULL)
*encoding = imm_encoding->encoding;
- DEBUG_TRACE ("exit with TRUE");
- return TRUE;
+ DEBUG_TRACE ("exit with true");
+ return true;
}
/* If 64-bit immediate IMM is in the format of
@@ -2768,7 +2768,7 @@ aarch64_match_operands_constraint (aarch64_inst *inst,
constraint checking will carried out by operand_general_constraint_met_p,
which has be to called after this in order to get all of the operands'
qualifiers established. */
- if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
+ if (match_operands_qualifier (inst, true /* update_p */) == 0)
{
DEBUG_TRACE ("FAIL on operand qualifier matching");
if (mismatch_detail)
@@ -3099,8 +3099,8 @@ print_register_offset_address (char *buf, size_t size,
const char *base, const char *offset)
{
char tb[16]; /* Temporary buffer. */
- bfd_boolean print_extend_p = TRUE;
- bfd_boolean print_amount_p = TRUE;
+ bool print_extend_p = true;
+ bool print_amount_p = true;
const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
@@ -3108,11 +3108,11 @@ print_register_offset_address (char *buf, size_t size,
{
/* Not print the shift/extend amount when the amount is zero and
when it is not the special case of 8-bit load/store instruction. */
- print_amount_p = FALSE;
+ print_amount_p = false;
/* Likewise, no need to print the shift operator LSL in such a
situation. */
if (opnd->shifter.kind == AARCH64_MOD_LSL)
- print_extend_p = FALSE;
+ print_extend_p = false;
}
/* Prepare for the extend/shift. */
@@ -3695,7 +3695,7 @@ aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
{
const aarch64_sys_reg *sr = aarch64_sys_regs + i;
- bfd_boolean exact_match
+ bool exact_match
= (!(sr->flags & (F_REG_READ | F_REG_WRITE))
|| (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
&& AARCH64_CPU_HAS_FEATURE (features, sr->features);
@@ -4685,7 +4685,7 @@ const aarch64_sys_reg aarch64_sys_regs [] =
{ 0, CPENC (0,0,0,0,0), 0, 0 }
};
-bfd_boolean
+bool
aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
{
return (reg_flags & F_DEPRECATED) != 0;
@@ -4711,12 +4711,12 @@ const aarch64_sys_reg aarch64_pstatefields [] =
{ 0, CPENC (0,0,0,0,0), 0, 0 },
};
-bfd_boolean
+bool
aarch64_pstatefield_supported_p (const aarch64_feature_set features,
const aarch64_sys_reg *reg)
{
if (!(reg->flags & F_ARCHEXT))
- return TRUE;
+ return true;
return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
}
@@ -4878,13 +4878,13 @@ const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
{ 0, CPENS(0,0,0,0), 0 }
};
-bfd_boolean
+bool
aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
{
return (sys_ins_reg->flags & F_HASXT) != 0;
}
-extern bfd_boolean
+extern bool
aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
const char *reg_name,
aarch64_insn reg_value,
@@ -4896,15 +4896,15 @@ aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
{
const char *suffix = strrchr (reg_name, '_');
if (suffix && !strcmp (suffix, "_el3"))
- return FALSE;
+ return false;
}
if (!(reg_flags & F_ARCHEXT))
- return TRUE;
+ return true;
if (reg_features
&& AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
- return TRUE;
+ return true;
/* ARMv8.4 TLB instructions. */
if ((reg_value == CPENS (0, C8, C1, 0)
@@ -4954,17 +4954,17 @@ aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
|| reg_value == CPENS (6, C8, C5, 1)
|| reg_value == CPENS (6, C8, C5, 5))
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
- return TRUE;
+ return true;
/* DC CVAP. Values are from aarch64_sys_regs_dc. */
if (reg_value == CPENS (3, C7, C12, 1)
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
- return TRUE;
+ return true;
/* DC CVADP. Values are from aarch64_sys_regs_dc. */
if (reg_value == CPENS (3, C7, C13, 1)
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
- return TRUE;
+ return true;
/* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
if ((reg_value == CPENS (0, C7, C6, 3)
@@ -4986,20 +4986,20 @@ aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
|| reg_value == CPENS (3, C7, C14, 5)
|| reg_value == CPENS (3, C7, C4, 4))
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
- return TRUE;
+ return true;
/* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
if ((reg_value == CPENS (0, C7, C9, 0)
|| reg_value == CPENS (0, C7, C9, 1))
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
- return TRUE;
+ return true;
/* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
if (reg_value == CPENS (3, C7, C3, 0)
&& AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
#undef C0
@@ -5025,7 +5025,7 @@ aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
static enum err_type
verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
- bfd_boolean encoding ATTRIBUTE_UNUSED,
+ bool encoding ATTRIBUTE_UNUSED,
aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
{
@@ -5055,7 +5055,7 @@ verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
static enum err_type
verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
- bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
+ bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
{
@@ -5130,7 +5130,7 @@ enum err_type
verify_constraints (const struct aarch64_inst *inst,
const aarch64_insn insn ATTRIBUTE_UNUSED,
bfd_vma pc,
- bfd_boolean encoding,
+ bool encoding,
aarch64_operand_error *mismatch_detail,
aarch64_instr_sequence *insn_sequence)
{
@@ -5154,7 +5154,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("instruction opens new dependency "
"sequence without ending previous one");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
}
@@ -5173,7 +5173,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
mismatch_detail->error = _("previous `movprfx' sequence not closed");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
/* Reset the sequence. */
init_insn_sequence (NULL, insn_sequence);
@@ -5193,7 +5193,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("SVE instruction expected after "
"`movprfx'");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5206,7 +5206,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("SVE `movprfx' compatible instruction "
"expected");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5216,13 +5216,13 @@ verify_constraints (const struct aarch64_inst *inst,
aarch64_opnd_info blk_pred, inst_pred;
memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
- bfd_boolean predicated = FALSE;
+ bool predicated = false;
assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
/* Determine if the movprfx instruction used is predicated or not. */
if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
{
- predicated = TRUE;
+ predicated = true;
blk_pred = insn_sequence->instr->operands[1];
}
@@ -5293,7 +5293,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("predicated instruction expected "
"after `movprfx'");
mismatch_detail->index = -1;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5305,7 +5305,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("merging predicate expected due "
"to preceding `movprfx'");
mismatch_detail->index = inst_pred_idx;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5318,7 +5318,7 @@ verify_constraints (const struct aarch64_inst *inst,
"from that in preceding "
"`movprfx'");
mismatch_detail->index = inst_pred_idx;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5337,7 +5337,7 @@ verify_constraints (const struct aarch64_inst *inst,
"`movprfx' not used in current "
"instruction");
mismatch_detail->index = 0;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5349,7 +5349,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("output register of preceding "
"`movprfx' expected as output");
mismatch_detail->index = 0;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5361,7 +5361,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("output register of preceding "
"`movprfx' used as input");
mismatch_detail->index = last_op_usage;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5377,7 +5377,7 @@ verify_constraints (const struct aarch64_inst *inst,
mismatch_detail->error = _("register size not compatible with "
"previous `movprfx'");
mismatch_detail->index = 0;
- mismatch_detail->non_fatal = TRUE;
+ mismatch_detail->non_fatal = true;
res = ERR_VFI;
goto done;
}
@@ -5405,14 +5405,14 @@ verify_constraints (const struct aarch64_inst *inst,
(with any element size, not just ESIZE) and if using DUPM would
therefore be OK. ESIZE is the number of bytes in the immediate. */
-bfd_boolean
+bool
aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
{
int64_t svalue = uvalue;
uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
- return FALSE;
+ return false;
if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
{
svalue = (int32_t) uvalue;
@@ -5420,7 +5420,7 @@ aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
{
svalue = (int16_t) uvalue;
if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
- return FALSE;
+ return false;
}
}
if ((svalue & 0xff) == 0)