aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Clifton <nickc@redhat.com>2016-03-29 11:34:22 +0100
committerNick Clifton <nickc@redhat.com>2016-03-29 11:34:22 +0100
commitef0d8ffc45aa32ed1e49051a344fa6c8cff583f4 (patch)
treeda9a4b43a0f54d2c8da0fd3fd17eee6afd5a4b98
parented3056ebdb9795446157af03d3e08fbb93c1b01d (diff)
downloadgdb-ef0d8ffc45aa32ed1e49051a344fa6c8cff583f4.zip
gdb-ef0d8ffc45aa32ed1e49051a344fa6c8cff583f4.tar.gz
gdb-ef0d8ffc45aa32ed1e49051a344fa6c8cff583f4.tar.bz2
Tidy up AArch64 simulator code.
* cpustate.c: Remove space after asterisk in function parameters. * decode.h (greg): Delete unused function. (vreg, shift, extension, scaling, writeback, condcode): Likewise. * simulator.c: Use INSTR macro in more places. (HALT_NYI): Use sim_io_eprintf in place of fprintf. Remove extraneous whitespace.
-rw-r--r--sim/aarch64/ChangeLog9
-rw-r--r--sim/aarch64/cpustate.c14
-rw-r--r--sim/aarch64/decode.h44
-rw-r--r--sim/aarch64/simulator.c2815
4 files changed, 1413 insertions, 1469 deletions
diff --git a/sim/aarch64/ChangeLog b/sim/aarch64/ChangeLog
index 58ef3af..b9b7a2f 100644
--- a/sim/aarch64/ChangeLog
+++ b/sim/aarch64/ChangeLog
@@ -1,3 +1,12 @@
+2016-03-29 Nick Clifton <nickc@redhat.com>
+
+ * cpustate.c: Remove space after asterisk in function parameters.
+ * decode.h (greg): Delete unused function.
+ (vreg, shift, extension, scaling, writeback, condcode): Likewise.
+ * simulator.c: Use INSTR macro in more places.
+ (HALT_NYI): Use sim_io_eprintf in place of fprintf.
+ Remove extraneous whitespace.
+
2016-03-23 Nick Clifton <nickc@redhat.com>
* cpustate.c (aarch64_get_FP_half): New function. Read a vector
diff --git a/sim/aarch64/cpustate.c b/sim/aarch64/cpustate.c
index 0656af5..4451b5d 100644
--- a/sim/aarch64/cpustate.c
+++ b/sim/aarch64/cpustate.c
@@ -470,25 +470,25 @@ aarch64_get_vec_double (sim_cpu *cpu, VReg reg, unsigned element)
while (0)
void
-aarch64_set_vec_u64 (sim_cpu * cpu, VReg reg, unsigned element, uint64_t val)
+aarch64_set_vec_u64 (sim_cpu *cpu, VReg reg, unsigned element, uint64_t val)
{
SET_VEC_ELEMENT (reg, element, val, v, "%16lx");
}
void
-aarch64_set_vec_u32 (sim_cpu * cpu, VReg reg, unsigned element, uint32_t val)
+aarch64_set_vec_u32 (sim_cpu *cpu, VReg reg, unsigned element, uint32_t val)
{
SET_VEC_ELEMENT (reg, element, val, w, "%8x");
}
void
-aarch64_set_vec_u16 (sim_cpu * cpu, VReg reg, unsigned element, uint16_t val)
+aarch64_set_vec_u16 (sim_cpu *cpu, VReg reg, unsigned element, uint16_t val)
{
SET_VEC_ELEMENT (reg, element, val, h, "%4x");
}
void
-aarch64_set_vec_u8 (sim_cpu * cpu, VReg reg, unsigned element, uint8_t val)
+aarch64_set_vec_u8 (sim_cpu *cpu, VReg reg, unsigned element, uint8_t val)
{
SET_VEC_ELEMENT (reg, element, val, b, "%x");
}
@@ -573,19 +573,19 @@ aarch64_test_FPSR_bit (sim_cpu *cpu, FPSRMask flag)
}
uint64_t
-aarch64_get_thread_id (sim_cpu * cpu)
+aarch64_get_thread_id (sim_cpu *cpu)
{
return cpu->tpidr;
}
uint32_t
-aarch64_get_FPCR (sim_cpu * cpu)
+aarch64_get_FPCR (sim_cpu *cpu)
{
return cpu->FPCR;
}
void
-aarch64_set_FPCR (sim_cpu * cpu, uint32_t val)
+aarch64_set_FPCR (sim_cpu *cpu, uint32_t val)
{
if (cpu->FPCR != val)
TRACE_REGISTER (cpu,
diff --git a/sim/aarch64/decode.h b/sim/aarch64/decode.h
index 49bfa9e..745b035 100644
--- a/sim/aarch64/decode.h
+++ b/sim/aarch64/decode.h
@@ -177,20 +177,6 @@ pickbits64 (uint64_t val, int hi, int lo)
return pick64 (val, hi, lo) >> lo;
}
-/* Decode registers, immediates and constants of various types. */
-
-static inline GReg
-greg (uint32_t val, int lo)
-{
- return (GReg) pickbits32 (val, lo + 4, lo);
-}
-
-static inline VReg
-vreg (uint32_t val, int lo)
-{
- return (VReg) pickbits32 (val, lo + 4, lo);
-}
-
static inline uint32_t
uimm (uint32_t val, int hi, int lo)
{
@@ -223,36 +209,6 @@ simm64 (uint64_t val, int hi, int lo)
return x.n >> (63 - hi + lo);
}
-static inline Shift
-shift (uint32_t val, int lo)
-{
- return (Shift) pickbits32 (val, lo + 1, lo);
-}
-
-static inline Extension
-extension (uint32_t val, int lo)
-{
- return (Extension) pickbits32 (val, lo + 2, lo);
-}
-
-static inline Scaling
-scaling (uint32_t val, int lo)
-{
- return (Scaling) pickbits32 (val, lo, lo);
-}
-
-static inline WriteBack
-writeback (uint32_t val, int lo)
-{
- return (WriteBack) pickbits32 (val, lo, lo);
-}
-
-static inline CondCode
-condcode (uint32_t val, int lo)
-{
- return (CondCode) pickbits32 (val, lo + 3, lo);
-}
-
/* Operation decode.
Bits [28,24] are the primary dispatch vector. */
diff --git a/sim/aarch64/simulator.c b/sim/aarch64/simulator.c
index a0231fd..1695174 100644
--- a/sim/aarch64/simulator.c
+++ b/sim/aarch64/simulator.c
@@ -39,6 +39,9 @@
#define IS_SET(_X) (TST (( _X )) ? 1 : 0)
#define IS_CLEAR(_X) (TST (( _X )) ? 0 : 1)
+/* Space saver macro. */
+#define INSTR(HIGH, LOW) uimm (aarch64_get_instr (cpu), (HIGH), (LOW))
+
#define HALT_UNALLOC \
do \
{ \
@@ -62,7 +65,7 @@
__LINE__, aarch64_get_PC (cpu)); \
if (! TRACE_ANY_P (cpu)) \
{ \
- fprintf (stderr, "SIM Error: Unimplemented instruction: "); \
+ sim_io_eprintf (CPU_STATE (cpu), "SIM Error: Unimplemented instruction: "); \
trace_disasm (CPU_STATE (cpu), cpu, aarch64_get_PC (cpu)); \
} \
sim_engine_halt (CPU_STATE (cpu), cpu, NULL, aarch64_get_PC (cpu),\
@@ -73,14 +76,11 @@
#define NYI_assert(HI, LO, EXPECTED) \
do \
{ \
- if (uimm (aarch64_get_instr (cpu), (HI), (LO)) != (EXPECTED)) \
+ if (INSTR ((HI), (LO)) != (EXPECTED)) \
HALT_NYI; \
} \
while (0)
-/* Space saver macro. */
-#define INSTR(HIGH, LOW) uimm (aarch64_get_instr (cpu), (HIGH), (LOW))
-
/* Helper functions used by expandLogicalImmediate. */
/* for i = 1, ... N result<i-1> = 1 other bits are zero */
@@ -179,7 +179,7 @@ dexNotify (sim_cpu *cpu)
{
/* instr[14,0] == type : 0 ==> method entry, 1 ==> method reentry
2 ==> exit Java, 3 ==> start next bytecode. */
- uint32_t type = uimm (aarch64_get_instr (cpu), 14, 0);
+ uint32_t type = INSTR (14, 0);
TRACE_EVENTS (cpu, "Notify Insn encountered, type = 0x%x", type);
@@ -234,7 +234,7 @@ dexPseudo (sim_cpu *cpu)
sim_stopped, SIM_SIGTRAP);
}
- dispatch = uimm (aarch64_get_instr (cpu), 31, 15);
+ dispatch = INSTR (31, 15);
/* We do not handle callouts at the moment. */
if (dispatch == PSEUDO_CALLOUT || dispatch == PSEUDO_CALLOUTR)
@@ -262,8 +262,8 @@ dexPseudo (sim_cpu *cpu)
static void
ldur32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -274,8 +274,8 @@ ldur32 (sim_cpu *cpu, int32_t offset)
static void
ldur64 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -286,8 +286,8 @@ ldur64 (sim_cpu *cpu, int32_t offset)
static void
ldurb32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u8
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -298,8 +298,8 @@ ldurb32 (sim_cpu *cpu, int32_t offset)
static void
ldursb32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rt, NO_SP, (uint32_t) aarch64_get_mem_s8
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -310,8 +310,8 @@ ldursb32 (sim_cpu *cpu, int32_t offset)
static void
ldursb64 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s8
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -322,8 +322,8 @@ ldursb64 (sim_cpu *cpu, int32_t offset)
static void
ldurh32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_mem_u16
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -334,8 +334,8 @@ ldurh32 (sim_cpu *cpu, int32_t offset)
static void
ldursh32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s16
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -346,8 +346,8 @@ ldursh32 (sim_cpu *cpu, int32_t offset)
static void
ldursh64 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s16
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -358,8 +358,8 @@ ldursh64 (sim_cpu *cpu, int32_t offset)
static void
ldursw (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) aarch64_get_mem_s32
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -373,8 +373,8 @@ ldursw (sim_cpu *cpu, int32_t offset)
static void
stur32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_mem_u32 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
@@ -385,8 +385,8 @@ stur32 (sim_cpu *cpu, int32_t offset)
static void
stur64 (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_mem_u64 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
@@ -397,8 +397,8 @@ stur64 (sim_cpu *cpu, int32_t offset)
static void
sturb (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_mem_u8 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
@@ -409,8 +409,8 @@ sturb (sim_cpu *cpu, int32_t offset)
static void
sturh (sim_cpu *cpu, int32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_mem_u16 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
@@ -425,7 +425,7 @@ sturh (sim_cpu *cpu, int32_t offset)
static void
ldr32_pcrel (sim_cpu *cpu, int32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_mem_u32
@@ -436,7 +436,7 @@ ldr32_pcrel (sim_cpu *cpu, int32_t offset)
static void
ldr_pcrel (sim_cpu *cpu, int32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_mem_u64
@@ -447,7 +447,7 @@ ldr_pcrel (sim_cpu *cpu, int32_t offset)
static void
ldrsw_pcrel (sim_cpu *cpu, int32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_mem_s32
@@ -458,7 +458,7 @@ ldrsw_pcrel (sim_cpu *cpu, int32_t offset)
static void
fldrs_pcrel (sim_cpu *cpu, int32_t offset)
{
- unsigned int rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rd = INSTR (4, 0);
aarch64_set_vec_u32 (cpu, rd, 0,
aarch64_get_mem_u32
@@ -469,7 +469,7 @@ fldrs_pcrel (sim_cpu *cpu, int32_t offset)
static void
fldrd_pcrel (sim_cpu *cpu, int32_t offset)
{
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int st = INSTR (4, 0);
aarch64_set_vec_u64 (cpu, st, 0,
aarch64_get_mem_u64
@@ -480,7 +480,7 @@ fldrd_pcrel (sim_cpu *cpu, int32_t offset)
static void
fldrq_pcrel (sim_cpu *cpu, int32_t offset)
{
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int st = INSTR (4, 0);
uint64_t addr = aarch64_get_PC (cpu) + offset * 4;
FRegister a;
@@ -538,8 +538,8 @@ extend (uint32_t value, Extension extension)
static void
fldrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
if (wb != Post)
@@ -557,8 +557,8 @@ fldrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
fldrb_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset;
aarch64_set_vec_u8 (cpu, rd, 0, aarch64_get_mem_u32 (cpu, addr));
@@ -568,8 +568,8 @@ fldrb_abs (sim_cpu *cpu, uint32_t offset)
static void
fldrh_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 16);
aarch64_set_vec_u16 (cpu, rd, 0, aarch64_get_mem_u16 (cpu, addr));
@@ -579,8 +579,8 @@ fldrh_abs (sim_cpu *cpu, uint32_t offset)
static void
fldrs_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 32);
aarch64_set_vec_u32 (cpu, rd, 0, aarch64_get_mem_u32 (cpu, addr));
@@ -590,8 +590,8 @@ fldrs_abs (sim_cpu *cpu, uint32_t offset)
static void
fldrd_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 64);
aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_mem_u64 (cpu, addr));
@@ -601,8 +601,8 @@ fldrd_abs (sim_cpu *cpu, uint32_t offset)
static void
fldrq_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + SCALE (offset, 128);
aarch64_set_vec_u64 (cpu, rd, 0, aarch64_get_mem_u64 (cpu, addr));
@@ -614,9 +614,9 @@ fldrq_abs (sim_cpu *cpu, uint32_t offset)
static void
fldrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 32, scaling);
@@ -629,8 +629,8 @@ fldrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
fldrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
if (wb != Post)
@@ -649,7 +649,7 @@ fldrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
fldrd_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
+ unsigned rm = INSTR (20, 16);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 64, scaling);
@@ -661,8 +661,8 @@ static void
fldrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
FRegister a;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
if (wb != Post)
@@ -682,7 +682,7 @@ fldrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
fldrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
+ unsigned rm = INSTR (20, 16);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
uint64_t displacement = OPT_SCALE (extended, 128, scaling);
@@ -717,8 +717,8 @@ fldrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldr32_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u32
@@ -730,8 +730,8 @@ ldr32_abs (sim_cpu *cpu, uint32_t offset)
static void
ldr32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -756,9 +756,9 @@ ldr32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
ldr32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -773,8 +773,8 @@ ldr32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldr_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u64
@@ -786,8 +786,8 @@ ldr_abs (sim_cpu *cpu, uint32_t offset)
static void
ldr_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -812,9 +812,9 @@ ldr_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
ldr_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -829,8 +829,8 @@ ldr_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldrb32_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be
there is no scaling required for a byte load. */
@@ -843,8 +843,8 @@ ldrb32_abs (sim_cpu *cpu, uint32_t offset)
static void
ldrb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -869,9 +869,9 @@ ldrb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
ldrb32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -888,8 +888,8 @@ ldrb32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldrsb_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -921,9 +921,9 @@ ldrsb_abs (sim_cpu *cpu, uint32_t offset)
static void
ldrsb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -938,8 +938,8 @@ ldrsb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldrh32_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_u16
@@ -952,8 +952,8 @@ ldrh32_abs (sim_cpu *cpu, uint32_t offset)
static void
ldrh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -978,9 +978,9 @@ ldrh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
ldrh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -995,8 +995,8 @@ ldrh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldrsh32_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
aarch64_set_reg_u64 (cpu, rt, NO_SP, (uint32_t) aarch64_get_mem_s16
@@ -1010,8 +1010,8 @@ ldrsh32_abs (sim_cpu *cpu, uint32_t offset)
static void
ldrsh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -1037,9 +1037,9 @@ ldrsh32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
ldrsh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -1055,8 +1055,8 @@ ldrsh32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldrsh_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
aarch64_set_reg_u64 (cpu, rt, NO_SP, aarch64_get_mem_s16
@@ -1069,8 +1069,8 @@ ldrsh_abs (sim_cpu *cpu, uint32_t offset)
static void
ldrsh64_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -1095,9 +1095,9 @@ ldrsh64_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
ldrsh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -1112,8 +1112,8 @@ ldrsh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
ldrsw_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
return aarch64_set_reg_s64 (cpu, rt, NO_SP, aarch64_get_mem_s32
@@ -1126,8 +1126,8 @@ ldrsw_abs (sim_cpu *cpu, uint32_t offset)
static void
ldrsw_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -1152,9 +1152,9 @@ ldrsw_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
ldrsw_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -1172,8 +1172,8 @@ ldrsw_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
str32_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
aarch64_set_mem_u32 (cpu, (aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -1185,8 +1185,8 @@ str32_abs (sim_cpu *cpu, uint32_t offset)
static void
str32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -1210,9 +1210,9 @@ str32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
str32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP), extension);
@@ -1226,8 +1226,8 @@ str32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
str_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_mem_u64 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -1239,8 +1239,8 @@ str_abs (sim_cpu *cpu, uint32_t offset)
static void
str_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -1265,9 +1265,9 @@ str_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
str_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -1283,8 +1283,8 @@ str_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
strb_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be.
There is no scaling required for a byte load. */
@@ -1297,8 +1297,8 @@ strb_abs (sim_cpu *cpu, uint32_t offset)
static void
strb_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -1323,9 +1323,9 @@ strb_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
strb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -1341,8 +1341,8 @@ strb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
strh_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* The target register may not be SP but the source may be. */
aarch64_set_mem_u16 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -1354,8 +1354,8 @@ strh_abs (sim_cpu *cpu, uint32_t offset)
static void
strh_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address;
if (rn == rt && wb != NoWriteBack)
@@ -1380,9 +1380,9 @@ strh_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
strh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR */
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -1404,7 +1404,7 @@ prfm_abs (sim_cpu *cpu, uint32_t offset)
10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM,
10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM,
ow ==> UNALLOC
- PrfOp prfop = prfop (aarch64_get_instr (cpu), 4, 0);
+ PrfOp prfop = prfop (instr, 4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK)
+ SCALE (offset, 64). */
@@ -1423,7 +1423,7 @@ prfm_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM,
ow ==> UNALLOC
rn may reference SP, rm may only reference ZR
- PrfOp prfop = prfop (aarch64_get_instr (cpu), 4, 0);
+ PrfOp prfop = prfop (instr, 4, 0);
uint64_t base = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
extension);
@@ -1444,7 +1444,7 @@ prfm_pcrel (sim_cpu *cpu, int32_t offset)
10010 ==> PSTL2KEEP, 10001 ==> PSTL2STRM,
10100 ==> PSTL3KEEP, 10101 ==> PSTL3STRM,
ow ==> UNALLOC
- PrfOp prfop = prfop (aarch64_get_instr (cpu), 4, 0);
+ PrfOp prfop = prfop (instr, 4, 0);
uint64_t address = aarch64_get_PC (cpu) + offset. */
/* TODO : implement this */
@@ -1455,12 +1455,12 @@ prfm_pcrel (sim_cpu *cpu, int32_t offset)
static void
ldxr (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int size = uimm (aarch64_get_instr (cpu), 31, 30);
- /* int ordered = uimm (aarch64_get_instr (cpu), 15, 15); */
- /* int exclusive = ! uimm (aarch64_get_instr (cpu), 23, 23); */
+ int size = INSTR (31, 30);
+ /* int ordered = INSTR (15, 15); */
+ /* int exclusive = ! INSTR (23, 23); */
switch (size)
{
@@ -1482,11 +1482,11 @@ ldxr (sim_cpu *cpu)
static void
stxr (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rs = uimm (aarch64_get_instr (cpu), 20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rt = INSTR (4, 0);
+ unsigned rs = INSTR (20, 16);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
- int size = uimm (aarch64_get_instr (cpu), 31, 30);
+ int size = INSTR (31, 30);
uint64_t data = aarch64_get_reg_u64 (cpu, rt, NO_SP);
switch (size)
@@ -1512,9 +1512,9 @@ dexLoadLiteral (sim_cpu *cpu)
instr[26] ==> V : 0 ==> GReg, 1 ==> FReg
instr[23, 5] == simm19 */
- /* unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); */
- uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 1)
- | uimm (aarch64_get_instr (cpu), 26, 26));
+ /* unsigned rt = INSTR (4, 0); */
+ uint32_t dispatch = ( (INSTR (31, 30) << 1)
+ | INSTR (26, 26));
int32_t imm = simm32 (aarch64_get_instr (cpu), 23, 5);
switch (dispatch)
@@ -1544,8 +1544,8 @@ dexLoadLiteral (sim_cpu *cpu)
static void
add32 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK) + aimm);
@@ -1555,8 +1555,8 @@ add32 (sim_cpu *cpu, uint32_t aimm)
static void
add64 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + aimm);
@@ -1725,8 +1725,8 @@ set_flags_for_binop64 (sim_cpu *cpu, uint64_t result)
static void
adds32 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* TODO : do we need to worry about signs here? */
int32_t value1 = aarch64_get_reg_s32 (cpu, rn, SP_OK);
@@ -1738,8 +1738,8 @@ adds32 (sim_cpu *cpu, uint32_t aimm)
static void
adds64 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint64_t value2 = aimm;
@@ -1751,8 +1751,8 @@ adds64 (sim_cpu *cpu, uint32_t aimm)
static void
sub32 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK) - aimm);
@@ -1762,8 +1762,8 @@ sub32 (sim_cpu *cpu, uint32_t aimm)
static void
sub64 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK) - aimm);
@@ -1773,8 +1773,8 @@ sub64 (sim_cpu *cpu, uint32_t aimm)
static void
subs32 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint32_t value2 = aimm;
@@ -1786,8 +1786,8 @@ subs32 (sim_cpu *cpu, uint32_t aimm)
static void
subs64 (sim_cpu *cpu, uint32_t aimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint32_t value2 = aimm;
@@ -1857,9 +1857,9 @@ shifted64 (uint64_t value, Shift shift, uint32_t count)
static void
add32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -1871,9 +1871,9 @@ add32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
add64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -1885,9 +1885,9 @@ add64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
adds32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -1901,9 +1901,9 @@ adds32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
adds64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
@@ -1917,9 +1917,9 @@ adds64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
sub32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -1931,9 +1931,9 @@ sub32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
sub64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -1945,9 +1945,9 @@ sub64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
subs32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -1961,9 +1961,9 @@ subs32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
subs64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
@@ -2025,9 +2025,9 @@ extreg64 (sim_cpu *cpu, unsigned int lo, Extension extension)
static void
add32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK)
@@ -2039,9 +2039,9 @@ add32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
static void
add64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -2052,9 +2052,9 @@ add64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
static void
adds32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK);
uint32_t value2 = extreg32 (cpu, rm, extension) << shift;
@@ -2068,9 +2068,9 @@ adds32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
static void
adds64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint64_t value2 = extreg64 (cpu, rm, extension) << shift;
@@ -2083,9 +2083,9 @@ adds64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
static void
sub32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, SP_OK)
@@ -2097,9 +2097,9 @@ sub32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
static void
sub64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, SP_OK)
@@ -2110,9 +2110,9 @@ sub64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
static void
subs32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, SP_OK);
uint32_t value2 = extreg32 (cpu, rm, extension) << shift;
@@ -2126,9 +2126,9 @@ subs32_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
static void
subs64_ext (sim_cpu *cpu, Extension extension, uint32_t shift)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, SP_OK);
uint64_t value2 = extreg64 (cpu, rm, extension) << shift;
@@ -2150,9 +2150,9 @@ dexAddSubtractImmediate (sim_cpu *cpu)
instr[4,0] = Rd */
/* N.B. the shift is applied at decode before calling the add/sub routine. */
- uint32_t shift = uimm (aarch64_get_instr (cpu), 23, 22);
- uint32_t imm = uimm (aarch64_get_instr (cpu), 21, 10);
- uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29);
+ uint32_t shift = INSTR (23, 22);
+ uint32_t imm = INSTR (21, 10);
+ uint32_t dispatch = INSTR (31, 29);
NYI_assert (28, 24, 0x11);
@@ -2188,25 +2188,24 @@ dexAddSubtractShiftedRegister (sim_cpu *cpu)
instr[9,5] = Rn
instr[4,0] = Rd */
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
- /* 32 bit operations must have count[5] = 0
- or else we have an UNALLOC. */
- uint32_t count = uimm (aarch64_get_instr (cpu), 15, 10);
- /* Shift encoded as ROR is unallocated. */
- Shift shiftType = shift (aarch64_get_instr (cpu), 22);
- /* Dispatch on size:op i.e aarch64_get_instr (cpu)[31,29]. */
- uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29);
+ uint32_t size = INSTR (31, 31);
+ uint32_t count = INSTR (15, 10);
+ Shift shiftType = INSTR (23, 22);
NYI_assert (28, 24, 0x0B);
NYI_assert (21, 21, 0);
+ /* Shift encoded as ROR is unallocated. */
if (shiftType == ROR)
HALT_UNALLOC;
- if (!size && uimm (count, 5, 5))
+ /* 32 bit operations must have count[5] = 0
+ or else we have an UNALLOC. */
+ if (size == 0 && uimm (count, 5, 5))
HALT_UNALLOC;
- switch (dispatch)
+ /* Dispatch on size:op i.e instr [31,29]. */
+ switch (INSTR (31, 29))
{
case 0: add32_shift (cpu, shiftType, count); break;
case 1: adds32_shift (cpu, shiftType, count); break;
@@ -2237,10 +2236,8 @@ dexAddSubtractExtendedRegister (sim_cpu *cpu)
instr[9,5] = Rn
instr[4,0] = Rd */
- Extension extensionType = extension (aarch64_get_instr (cpu), 13);
- uint32_t shift = uimm (aarch64_get_instr (cpu), 12, 10);
- /* dispatch on size:op:set? i.e aarch64_get_instr (cpu)[31,29] */
- uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29);
+ Extension extensionType = INSTR (15, 13);
+ uint32_t shift = INSTR (12, 10);
NYI_assert (28, 24, 0x0B);
NYI_assert (21, 21, 1);
@@ -2249,7 +2246,8 @@ dexAddSubtractExtendedRegister (sim_cpu *cpu)
if (shift > 4)
HALT_UNALLOC;
- switch (dispatch)
+ /* Dispatch on size:op:set?. */
+ switch (INSTR (31, 29))
{
case 0: add32_ext (cpu, extensionType, shift); break;
case 1: adds32_ext (cpu, extensionType, shift); break;
@@ -2271,9 +2269,9 @@ dexAddSubtractExtendedRegister (sim_cpu *cpu)
static void
adc32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -2285,9 +2283,9 @@ adc32 (sim_cpu *cpu)
static void
adc64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -2299,9 +2297,9 @@ adc64 (sim_cpu *cpu)
static void
adcs32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = aarch64_get_reg_u32 (cpu, rm, NO_SP);
@@ -2315,9 +2313,9 @@ adcs32 (sim_cpu *cpu)
static void
adcs64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
@@ -2331,9 +2329,9 @@ adcs64 (sim_cpu *cpu)
static void
sbc32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5); /* ngc iff rn == 31. */
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5); /* ngc iff rn == 31. */
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -2345,9 +2343,9 @@ sbc32 (sim_cpu *cpu)
static void
sbc64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -2359,9 +2357,9 @@ sbc64 (sim_cpu *cpu)
static void
sbcs32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = aarch64_get_reg_u32 (cpu, rm, NO_SP);
@@ -2376,9 +2374,9 @@ sbcs32 (sim_cpu *cpu)
static void
sbcs64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
@@ -2401,16 +2399,15 @@ dexAddSubtractWithCarry (sim_cpu *cpu)
instr[9,5] = Rn
instr[4,0] = Rd */
- uint32_t op2 = uimm (aarch64_get_instr (cpu), 15, 10);
- /* Dispatch on size:op:set? i.e aarch64_get_instr (cpu)[31,29] */
- uint32_t dispatch = uimm (aarch64_get_instr (cpu), 31, 29);
+ uint32_t op2 = INSTR (15, 10);
NYI_assert (28, 21, 0xD0);
if (op2 != 0)
HALT_UNALLOC;
- switch (dispatch)
+ /* Dispatch on size:op:set?. */
+ switch (INSTR (31, 29))
{
case 0: adc32 (cpu); break;
case 1: adcs32 (cpu); break;
@@ -2480,19 +2477,19 @@ CondCompare (sim_cpu *cpu) /* aka: ccmp and ccmn */
NYI_assert (10, 10, 0);
NYI_assert (4, 4, 0);
- if (! testConditionCode (cpu, uimm (aarch64_get_instr (cpu), 15, 12)))
+ if (! testConditionCode (cpu, INSTR (15, 12)))
{
- aarch64_set_CPSR (cpu, uimm (aarch64_get_instr (cpu), 3, 0));
+ aarch64_set_CPSR (cpu, INSTR (3, 0));
return;
}
- negate = uimm (aarch64_get_instr (cpu), 30, 30) ? 1 : -1;
- rm = uimm (aarch64_get_instr (cpu), 20, 16);
- rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ negate = INSTR (30, 30) ? 1 : -1;
+ rm = INSTR (20, 16);
+ rn = INSTR ( 9, 5);
- if (uimm (aarch64_get_instr (cpu), 31, 31))
+ if (INSTR (31, 31))
{
- if (uimm (aarch64_get_instr (cpu), 11, 11))
+ if (INSTR (11, 11))
set_flags_for_sub64 (cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK),
negate * (uint64_t) rm);
else
@@ -2501,7 +2498,7 @@ CondCompare (sim_cpu *cpu) /* aka: ccmp and ccmn */
}
else
{
- if (uimm (aarch64_get_instr (cpu), 11, 11))
+ if (INSTR (11, 11))
set_flags_for_sub32 (cpu, aarch64_get_reg_u32 (cpu, rn, SP_OK),
negate * rm);
else
@@ -2523,16 +2520,16 @@ do_vec_MOV_whole_vector (sim_cpu *cpu)
instr[9,5] = Vs
instr[4,0] = Vd */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
NYI_assert (29, 21, 0x075);
NYI_assert (15, 10, 0x07);
- if (uimm (aarch64_get_instr (cpu), 20, 16) != vs)
+ if (INSTR (20, 16) != vs)
HALT_NYI;
- if (uimm (aarch64_get_instr (cpu), 30, 30))
+ if (INSTR (30, 30))
aarch64_set_vec_u64 (cpu, vd, 1, aarch64_get_vec_u64 (cpu, vs, 1));
aarch64_set_vec_u64 (cpu, vd, 0, aarch64_get_vec_u64 (cpu, vs, 0));
@@ -2549,13 +2546,13 @@ do_vec_MOV_into_scalar (sim_cpu *cpu)
instr[9,5] = V source
instr[4,0] = R dest */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vs = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
NYI_assert (29, 21, 0x070);
NYI_assert (17, 10, 0x0F);
- switch (uimm (aarch64_get_instr (cpu), 20, 18))
+ switch (INSTR (20, 18))
{
case 0x2:
aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u64 (cpu, vs, 0));
@@ -2570,7 +2567,7 @@ do_vec_MOV_into_scalar (sim_cpu *cpu)
case 0x5:
case 0x7:
aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u32
- (cpu, vs, uimm (aarch64_get_instr (cpu), 20, 19)));
+ (cpu, vs, INSTR (20, 19)));
break;
default:
@@ -2588,33 +2585,33 @@ do_vec_INS (sim_cpu *cpu)
instr[4,0] = V dest */
int index;
- unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
NYI_assert (31, 21, 0x270);
NYI_assert (15, 10, 0x07);
- if (uimm (aarch64_get_instr (cpu), 16, 16))
+ if (INSTR (16, 16))
{
- index = uimm (aarch64_get_instr (cpu), 20, 17);
+ index = INSTR (20, 17);
aarch64_set_vec_u8 (cpu, vd, index,
aarch64_get_reg_u8 (cpu, rs, NO_SP));
}
- else if (uimm (aarch64_get_instr (cpu), 17, 17))
+ else if (INSTR (17, 17))
{
- index = uimm (aarch64_get_instr (cpu), 20, 18);
+ index = INSTR (20, 18);
aarch64_set_vec_u16 (cpu, vd, index,
aarch64_get_reg_u16 (cpu, rs, NO_SP));
}
- else if (uimm (aarch64_get_instr (cpu), 18, 18))
+ else if (INSTR (18, 18))
{
- index = uimm (aarch64_get_instr (cpu), 20, 19);
+ index = INSTR (20, 19);
aarch64_set_vec_u32 (cpu, vd, index,
aarch64_get_reg_u32 (cpu, rs, NO_SP));
}
- else if (uimm (aarch64_get_instr (cpu), 19, 19))
+ else if (INSTR (19, 19))
{
- index = uimm (aarch64_get_instr (cpu), 20, 20);
+ index = INSTR (20, 20);
aarch64_set_vec_u64 (cpu, vd, index,
aarch64_get_reg_u64 (cpu, rs, NO_SP));
}
@@ -2633,44 +2630,44 @@ do_vec_DUP_vector_into_vector (sim_cpu *cpu)
instr[9,5] = V source
instr[4,0] = V dest. */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
int i, index;
NYI_assert (29, 21, 0x070);
NYI_assert (15, 10, 0x01);
- if (uimm (aarch64_get_instr (cpu), 16, 16))
+ if (INSTR (16, 16))
{
- index = uimm (aarch64_get_instr (cpu), 20, 17);
+ index = INSTR (20, 17);
for (i = 0; i < (full ? 16 : 8); i++)
aarch64_set_vec_u8 (cpu, vd, i, aarch64_get_vec_u8 (cpu, vs, index));
}
- else if (uimm (aarch64_get_instr (cpu), 17, 17))
+ else if (INSTR (17, 17))
{
- index = uimm (aarch64_get_instr (cpu), 20, 18);
+ index = INSTR (20, 18);
for (i = 0; i < (full ? 8 : 4); i++)
aarch64_set_vec_u16 (cpu, vd, i, aarch64_get_vec_u16 (cpu, vs, index));
}
- else if (uimm (aarch64_get_instr (cpu), 18, 18))
+ else if (INSTR (18, 18))
{
- index = uimm (aarch64_get_instr (cpu), 20, 19);
+ index = INSTR (20, 19);
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_u32 (cpu, vd, i, aarch64_get_vec_u32 (cpu, vs, index));
}
else
{
- if (uimm (aarch64_get_instr (cpu), 19, 19) == 0)
+ if (INSTR (19, 19) == 0)
HALT_UNALLOC;
if (! full)
HALT_UNALLOC;
- index = uimm (aarch64_get_instr (cpu), 20, 20);
+ index = INSTR (20, 20);
for (i = 0; i < 2; i++)
aarch64_set_vec_u64 (cpu, vd, i, aarch64_get_vec_u64 (cpu, vs, index));
@@ -2690,11 +2687,11 @@ do_vec_TBL (sim_cpu *cpu)
instr[9,5] = V start
instr[4,0] = V dest */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- int len = uimm (aarch64_get_instr (cpu), 14, 13) + 1;
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int full = INSTR (30, 30);
+ int len = INSTR (14, 13) + 1;
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 21, 0x070);
@@ -2735,17 +2732,17 @@ do_vec_TRN (sim_cpu *cpu)
instr[9,5] = V source
instr[4,0] = V dest. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- int second = uimm (aarch64_get_instr (cpu), 14, 14);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int full = INSTR (30, 30);
+ int second = INSTR (14, 14);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 24, 0x0E);
NYI_assert (13, 10, 0xA);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 8 : 4); i++)
@@ -2808,14 +2805,14 @@ do_vec_DUP_scalar_into_vector (sim_cpu *cpu)
instr[4,0] = V dest. */
unsigned i;
- unsigned Vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned Rs = uimm (aarch64_get_instr (cpu), 9, 5);
- int both = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned Vd = INSTR (4, 0);
+ unsigned Rs = INSTR (9, 5);
+ int both = INSTR (30, 30);
NYI_assert (29, 20, 0x0E0);
NYI_assert (15, 10, 0x03);
- switch (uimm (aarch64_get_instr (cpu), 19, 16))
+ switch (INSTR (19, 16))
{
case 1:
for (i = 0; i < (both ? 16 : 8); i++)
@@ -2859,12 +2856,12 @@ do_vec_UZP (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- int upper = uimm (aarch64_get_instr (cpu), 14, 14);
+ int full = INSTR (30, 30);
+ int upper = INSTR (14, 14);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0);
uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1);
@@ -2883,7 +2880,7 @@ do_vec_UZP (sim_cpu *cpu)
NYI_assert (15, 15, 0);
NYI_assert (13, 10, 6);
- switch (uimm (aarch64_get_instr (cpu), 23, 23))
+ switch (INSTR (23, 23))
{
case 0:
for (i = 0; i < 8; i++)
@@ -2931,12 +2928,12 @@ do_vec_ZIP (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- int upper = uimm (aarch64_get_instr (cpu), 14, 14);
+ int full = INSTR (30, 30);
+ int upper = INSTR (14, 14);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
uint64_t val_m1 = aarch64_get_vec_u64 (cpu, vm, 0);
uint64_t val_m2 = aarch64_get_vec_u64 (cpu, vm, 1);
@@ -2954,7 +2951,7 @@ do_vec_ZIP (sim_cpu *cpu)
NYI_assert (15, 15, 0);
NYI_assert (13, 10, 0xE);
- switch (uimm (aarch64_get_instr (cpu), 23, 23))
+ switch (INSTR (23, 23))
{
case 0:
val1 =
@@ -3106,22 +3103,22 @@ do_vec_MOV_immediate (sim_cpu *cpu)
instr[9,5] = low 5-bits of uimm8
instr[4,0] = Vd. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned val = uimm (aarch64_get_instr (cpu), 18, 16) << 5
- | uimm (aarch64_get_instr (cpu), 9, 5);
+ int full = INSTR (30, 30);
+ unsigned vd = INSTR (4, 0);
+ unsigned val = INSTR (18, 16) << 5
+ | INSTR (9, 5);
unsigned i;
NYI_assert (29, 19, 0x1E0);
NYI_assert (11, 10, 1);
- switch (uimm (aarch64_get_instr (cpu), 15, 12))
+ switch (INSTR (15, 12))
{
case 0x0: /* 32-bit, no shift. */
case 0x2: /* 32-bit, shift by 8. */
case 0x4: /* 32-bit, shift by 16. */
case 0x6: /* 32-bit, shift by 24. */
- val <<= (8 * uimm (aarch64_get_instr (cpu), 14, 13));
+ val <<= (8 * INSTR (14, 13));
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_u32 (cpu, vd, i, val);
break;
@@ -3174,22 +3171,22 @@ do_vec_MVNI (sim_cpu *cpu)
instr[9,5] = low 5-bits of uimm8
instr[4,0] = Vd. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned val = uimm (aarch64_get_instr (cpu), 18, 16) << 5
- | uimm (aarch64_get_instr (cpu), 9, 5);
+ int full = INSTR (30, 30);
+ unsigned vd = INSTR (4, 0);
+ unsigned val = INSTR (18, 16) << 5
+ | INSTR (9, 5);
unsigned i;
NYI_assert (29, 19, 0x5E0);
NYI_assert (11, 10, 1);
- switch (uimm (aarch64_get_instr (cpu), 15, 12))
+ switch (INSTR (15, 12))
{
case 0x0: /* 32-bit, no shift. */
case 0x2: /* 32-bit, shift by 8. */
case 0x4: /* 32-bit, shift by 16. */
case 0x6: /* 32-bit, shift by 24. */
- val <<= (8 * uimm (aarch64_get_instr (cpu), 14, 13));
+ val <<= (8 * INSTR (14, 13));
val = ~ val;
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_u32 (cpu, vd, i, val);
@@ -3256,15 +3253,15 @@ do_vec_ABS (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
unsigned i;
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0x82E);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -3305,16 +3302,16 @@ do_vec_ADDV (sim_cpu *cpu)
instr[9,5] = Vm
instr[4.0] = Rd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
unsigned i;
uint64_t val = 0;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0xC6E);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -3356,49 +3353,49 @@ do_vec_ins_2 (sim_cpu *cpu)
instr[4,0] = Vd. */
unsigned elem;
- unsigned vm = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
NYI_assert (31, 21, 0x270);
NYI_assert (17, 14, 0);
NYI_assert (12, 10, 7);
- if (uimm (aarch64_get_instr (cpu), 13, 13) == 1)
+ if (INSTR (13, 13) == 1)
{
- if (uimm (aarch64_get_instr (cpu), 18, 18) == 1)
+ if (INSTR (18, 18) == 1)
{
/* 32-bit moves. */
- elem = uimm (aarch64_get_instr (cpu), 20, 19);
+ elem = INSTR (20, 19);
aarch64_set_reg_u64 (cpu, vd, NO_SP,
aarch64_get_vec_u32 (cpu, vm, elem));
}
else
{
/* 64-bit moves. */
- if (uimm (aarch64_get_instr (cpu), 19, 19) != 1)
+ if (INSTR (19, 19) != 1)
HALT_NYI;
- elem = uimm (aarch64_get_instr (cpu), 20, 20);
+ elem = INSTR (20, 20);
aarch64_set_reg_u64 (cpu, vd, NO_SP,
aarch64_get_vec_u64 (cpu, vm, elem));
}
}
else
{
- if (uimm (aarch64_get_instr (cpu), 18, 18) == 1)
+ if (INSTR (18, 18) == 1)
{
/* 32-bit moves. */
- elem = uimm (aarch64_get_instr (cpu), 20, 19);
+ elem = INSTR (20, 19);
aarch64_set_vec_u32 (cpu, vd, elem,
aarch64_get_reg_u32 (cpu, vm, NO_SP));
}
else
{
/* 64-bit moves. */
- if (uimm (aarch64_get_instr (cpu), 19, 19) != 1)
+ if (INSTR (19, 19) != 1)
HALT_NYI;
- elem = uimm (aarch64_get_instr (cpu), 20, 20);
+ elem = INSTR (20, 20);
aarch64_set_vec_u64 (cpu, vd, elem,
aarch64_get_reg_u64 (cpu, vm, NO_SP));
}
@@ -3419,17 +3416,17 @@ do_vec_mull (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- int unsign = uimm (aarch64_get_instr (cpu), 29, 29);
- int bias = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int unsign = INSTR (29, 29);
+ int bias = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR ( 9, 5);
+ unsigned vd = INSTR ( 4, 0);
unsigned i;
NYI_assert (28, 24, 0x0E);
NYI_assert (15, 10, 0x30);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
if (bias)
@@ -3497,19 +3494,19 @@ do_vec_fadd (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x35);
- if (uimm (aarch64_get_instr (cpu), 23, 23))
+ if (INSTR (23, 23))
{
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_NYI;
@@ -3529,7 +3526,7 @@ do_vec_fadd (sim_cpu *cpu)
}
else
{
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_NYI;
@@ -3562,17 +3559,17 @@ do_vec_add (sim_cpu *cpu)
instr[9,5] = Vm
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x21);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -3617,17 +3614,17 @@ do_vec_mul (sim_cpu *cpu)
instr[9,5] = Vm
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x27);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -3677,17 +3674,17 @@ do_vec_MLA (sim_cpu *cpu)
instr[9,5] = Vm
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x25);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -3786,29 +3783,29 @@ dminnm (double a, double b)
static void
do_vec_FminmaxNMP (sim_cpu *cpu)
{
- /* aarch64_get_instr (cpu)[31] = 0
- aarch64_get_instr (cpu)[30] = half (0)/full (1)
- aarch64_get_instr (cpu)[29,24] = 10 1110
- aarch64_get_instr (cpu)[23] = max(0)/min(1)
- aarch64_get_instr (cpu)[22] = float (0)/double (1)
- aarch64_get_instr (cpu)[21] = 1
- aarch64_get_instr (cpu)[20,16] = Vn
- aarch64_get_instr (cpu)[15,10] = 1100 01
- aarch64_get_instr (cpu)[9,5] = Vm
- aarch64_get_instr (cpu)[4.0] = Vd. */
-
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ /* instr [31] = 0
+ instr [30] = half (0)/full (1)
+ instr [29,24] = 10 1110
+ instr [23] = max(0)/min(1)
+ instr [22] = float (0)/double (1)
+ instr [21] = 1
+ instr [20,16] = Vn
+ instr [15,10] = 1100 01
+ instr [9,5] = Vm
+ instr [4.0] = Vd. */
+
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ int full = INSTR (30, 30);
NYI_assert (29, 24, 0x2E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x31);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
- double (* fn)(double, double) = uimm (aarch64_get_instr (cpu), 23, 23)
+ double (* fn)(double, double) = INSTR (23, 23)
? dminnm : dmaxnm;
if (! full)
@@ -3822,7 +3819,7 @@ do_vec_FminmaxNMP (sim_cpu *cpu)
}
else
{
- float (* fn)(float, float) = uimm (aarch64_get_instr (cpu), 23, 23)
+ float (* fn)(float, float) = INSTR (23, 23)
? fminnm : fmaxnm;
aarch64_set_vec_float (cpu, vd, 0,
@@ -3854,11 +3851,11 @@ do_vec_AND (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 21, 0x071);
NYI_assert (15, 10, 0x07);
@@ -3880,11 +3877,11 @@ do_vec_BSL (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 21, 0x173);
NYI_assert (15, 10, 0x07);
@@ -3908,11 +3905,11 @@ do_vec_EOR (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 21, 0x171);
NYI_assert (15, 10, 0x07);
@@ -3936,11 +3933,11 @@ do_vec_bit (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned test_false = uimm (aarch64_get_instr (cpu), 22, 22);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned test_false = INSTR (22, 22);
unsigned i;
NYI_assert (29, 23, 0x5D);
@@ -3972,11 +3969,11 @@ do_vec_ORN (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 21, 0x077);
NYI_assert (15, 10, 0x07);
@@ -3998,11 +3995,11 @@ do_vec_ORR (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 21, 0x075);
NYI_assert (15, 10, 0x07);
@@ -4024,11 +4021,11 @@ do_vec_BIC (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 21, 0x073);
NYI_assert (15, 10, 0x07);
@@ -4050,15 +4047,15 @@ do_vec_XTN (sim_cpu *cpu)
instr[9,5] = Vs
instr[4,0] = Vd. */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned bias = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned bias = INSTR (30, 30);
unsigned i;
NYI_assert (29, 24, 0x0E);
NYI_assert (21, 10, 0x84A);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
if (bias)
@@ -4107,9 +4104,9 @@ do_vec_maxv (sim_cpu *cpu)
instr[9,5] = V source
instr[4.0] = R dest. */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned vs = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
unsigned i;
NYI_assert (28, 24, 0x0E);
@@ -4117,13 +4114,13 @@ do_vec_maxv (sim_cpu *cpu)
NYI_assert (20, 17, 8);
NYI_assert (15, 10, 0x2A);
- switch ((uimm (aarch64_get_instr (cpu), 29, 29) << 1)
- | uimm (aarch64_get_instr (cpu), 16, 16))
+ switch ((INSTR (29, 29) << 1)
+ | INSTR (16, 16))
{
case 0: /* SMAXV. */
{
int64_t smax;
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
smax = aarch64_get_vec_s8 (cpu, vs, 0);
@@ -4150,7 +4147,7 @@ do_vec_maxv (sim_cpu *cpu)
case 1: /* SMINV. */
{
int64_t smin;
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
smin = aarch64_get_vec_s8 (cpu, vs, 0);
@@ -4178,7 +4175,7 @@ do_vec_maxv (sim_cpu *cpu)
case 2: /* UMAXV. */
{
uint64_t umax;
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
umax = aarch64_get_vec_u8 (cpu, vs, 0);
@@ -4206,7 +4203,7 @@ do_vec_maxv (sim_cpu *cpu)
case 3: /* UMINV. */
{
uint64_t umin;
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
umin = aarch64_get_vec_u8 (cpu, vs, 0);
@@ -4244,8 +4241,8 @@ do_vec_fminmaxV (sim_cpu *cpu)
instr[9,5] = V source
instr[4.0] = R dest. */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vs = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
unsigned i;
float res = aarch64_get_vec_float (cpu, vs, 0);
@@ -4253,9 +4250,9 @@ do_vec_fminmaxV (sim_cpu *cpu)
NYI_assert (22, 14, 0x0C3);
NYI_assert (11, 10, 2);
- if (uimm (aarch64_get_instr (cpu), 23, 23))
+ if (INSTR (23, 23))
{
- switch (uimm (aarch64_get_instr (cpu), 13, 12))
+ switch (INSTR (13, 12))
{
case 0: /* FMNINNMV. */
for (i = 1; i < 4; i++)
@@ -4273,7 +4270,7 @@ do_vec_fminmaxV (sim_cpu *cpu)
}
else
{
- switch (uimm (aarch64_get_instr (cpu), 13, 12))
+ switch (INSTR (13, 12))
{
case 0: /* FMNAXNMV. */
for (i = 1; i < 4; i++)
@@ -4309,11 +4306,11 @@ do_vec_Fminmax (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned min = uimm (aarch64_get_instr (cpu), 23, 23);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned min = INSTR (23, 23);
unsigned i;
NYI_assert (29, 24, 0x0E);
@@ -4321,16 +4318,16 @@ do_vec_Fminmax (sim_cpu *cpu)
NYI_assert (15, 14, 3);
NYI_assert (11, 10, 1);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
double (* func)(double, double);
if (! full)
HALT_NYI;
- if (uimm (aarch64_get_instr (cpu), 13, 12) == 0)
+ if (INSTR (13, 12) == 0)
func = min ? dminnm : dmaxnm;
- else if (uimm (aarch64_get_instr (cpu), 13, 12) == 3)
+ else if (INSTR (13, 12) == 3)
func = min ? fmin : fmax;
else
HALT_NYI;
@@ -4344,9 +4341,9 @@ do_vec_Fminmax (sim_cpu *cpu)
{
float (* func)(float, float);
- if (uimm (aarch64_get_instr (cpu), 13, 12) == 0)
+ if (INSTR (13, 12) == 0)
func = min ? fminnm : fmaxnm;
- else if (uimm (aarch64_get_instr (cpu), 13, 12) == 3)
+ else if (INSTR (13, 12) == 3)
func = min ? fminf : fmaxf;
else
HALT_NYI;
@@ -4369,10 +4366,10 @@ do_vec_SCVTF (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd. */
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned size = uimm (aarch64_get_instr (cpu), 22, 22);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned size = INSTR (22, 22);
unsigned i;
NYI_assert (29, 23, 0x1C);
@@ -4482,7 +4479,7 @@ do_vec_SCVTF (sim_cpu *cpu)
{ \
if (vm != 0) \
HALT_NYI; \
- if (uimm (aarch64_get_instr (cpu), 22, 22)) \
+ if (INSTR (22, 22)) \
{ \
if (! full) \
HALT_NYI; \
@@ -4505,7 +4502,7 @@ do_vec_SCVTF (sim_cpu *cpu)
#define VEC_FCMP(CMP) \
do \
{ \
- if (uimm (aarch64_get_instr (cpu), 22, 22)) \
+ if (INSTR (22, 22)) \
{ \
if (! full) \
HALT_NYI; \
@@ -4544,31 +4541,31 @@ do_vec_compare (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- int size = uimm (aarch64_get_instr (cpu), 23, 22);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int full = INSTR (30, 30);
+ int size = INSTR (23, 22);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (28, 24, 0x0E);
NYI_assert (21, 21, 1);
- if ((uimm (aarch64_get_instr (cpu), 11, 11)
- && uimm (aarch64_get_instr (cpu), 14, 14))
- || ((uimm (aarch64_get_instr (cpu), 11, 11) == 0
- && uimm (aarch64_get_instr (cpu), 10, 10) == 0)))
+ if ((INSTR (11, 11)
+ && INSTR (14, 14))
+ || ((INSTR (11, 11) == 0
+ && INSTR (10, 10) == 0)))
{
/* A compare vs 0. */
if (vm != 0)
{
- if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x2A)
+ if (INSTR (15, 10) == 0x2A)
do_vec_maxv (cpu);
- else if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x32
- || uimm (aarch64_get_instr (cpu), 15, 10) == 0x3E)
+ else if (INSTR (15, 10) == 0x32
+ || INSTR (15, 10) == 0x3E)
do_vec_fminmaxV (cpu);
- else if (uimm (aarch64_get_instr (cpu), 29, 23) == 0x1C
- && uimm (aarch64_get_instr (cpu), 21, 10) == 0x876)
+ else if (INSTR (29, 23) == 0x1C
+ && INSTR (21, 10) == 0x876)
do_vec_SCVTF (cpu);
else
HALT_NYI;
@@ -4576,12 +4573,12 @@ do_vec_compare (sim_cpu *cpu)
}
}
- if (uimm (aarch64_get_instr (cpu), 14, 14))
+ if (INSTR (14, 14))
{
/* A floating point compare. */
- unsigned decode = (uimm (aarch64_get_instr (cpu), 29, 29) << 5)
- | (uimm (aarch64_get_instr (cpu), 23, 23) << 4)
- | uimm (aarch64_get_instr (cpu), 13, 10);
+ unsigned decode = (INSTR (29, 29) << 5)
+ | (INSTR (23, 23) << 4)
+ | INSTR (13, 10);
NYI_assert (15, 15, 1);
@@ -4602,8 +4599,8 @@ do_vec_compare (sim_cpu *cpu)
}
else
{
- unsigned decode = (uimm (aarch64_get_instr (cpu), 29, 29) << 6)
- | uimm (aarch64_get_instr (cpu), 15, 10);
+ unsigned decode = (INSTR (29, 29) << 6)
+ | INSTR (15, 10);
switch (decode)
{
@@ -4638,10 +4635,10 @@ do_vec_SSHL (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd. */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
signed int shift;
@@ -4651,7 +4648,7 @@ do_vec_SSHL (sim_cpu *cpu)
/* FIXME: What is a signed shift left in this context ?. */
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -4722,17 +4719,17 @@ do_vec_USHL (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
signed int shift;
NYI_assert (29, 24, 0x2E);
NYI_assert (15, 10, 0x11);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -4803,17 +4800,17 @@ do_vec_FMLA (sim_cpu *cpu)
instr[9,5] = Vm
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 23, 0x1C);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x33);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_UNALLOC;
@@ -4847,19 +4844,19 @@ do_vec_max (sim_cpu *cpu)
instr[9,5] = Vm
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (28, 24, 0x0E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x19);
- if (uimm (aarch64_get_instr (cpu), 29, 29))
+ if (INSTR (29, 29))
{
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -4894,7 +4891,7 @@ do_vec_max (sim_cpu *cpu)
}
else
{
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -4943,19 +4940,19 @@ do_vec_min (sim_cpu *cpu)
instr[9,5] = Vm
instr[4.0] = Vd. */
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (28, 24, 0x0E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x1B);
- if (uimm (aarch64_get_instr (cpu), 29, 29))
+ if (INSTR (29, 29))
{
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -4990,7 +4987,7 @@ do_vec_min (sim_cpu *cpu)
}
else
{
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -5039,10 +5036,10 @@ do_vec_sub_long (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = V dest. */
- unsigned size = uimm (aarch64_get_instr (cpu), 23, 22);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned size = INSTR (23, 22);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned bias = 0;
unsigned i;
@@ -5053,7 +5050,7 @@ do_vec_sub_long (sim_cpu *cpu)
if (size == 3)
HALT_UNALLOC;
- switch (uimm (aarch64_get_instr (cpu), 30, 29))
+ switch (INSTR (30, 29))
{
case 2: /* SSUBL2. */
bias = 2;
@@ -5138,11 +5135,11 @@ do_vec_ADDP (sim_cpu *cpu)
FRegister copy_vn;
FRegister copy_vm;
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned size = uimm (aarch64_get_instr (cpu), 23, 22);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned size = INSTR (23, 22);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i, range;
NYI_assert (29, 24, 0x0E);
@@ -5208,38 +5205,38 @@ do_vec_UMOV (sim_cpu *cpu)
instr[9,5] = V source
instr[4,0] = R dest. */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vs = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
unsigned index;
NYI_assert (29, 21, 0x070);
NYI_assert (15, 10, 0x0F);
- if (uimm (aarch64_get_instr (cpu), 16, 16))
+ if (INSTR (16, 16))
{
/* Byte transfer. */
- index = uimm (aarch64_get_instr (cpu), 20, 17);
+ index = INSTR (20, 17);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_vec_u8 (cpu, vs, index));
}
- else if (uimm (aarch64_get_instr (cpu), 17, 17))
+ else if (INSTR (17, 17))
{
- index = uimm (aarch64_get_instr (cpu), 20, 18);
+ index = INSTR (20, 18);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_vec_u16 (cpu, vs, index));
}
- else if (uimm (aarch64_get_instr (cpu), 18, 18))
+ else if (INSTR (18, 18))
{
- index = uimm (aarch64_get_instr (cpu), 20, 19);
+ index = INSTR (20, 19);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_vec_u32 (cpu, vs, index));
}
else
{
- if (uimm (aarch64_get_instr (cpu), 30, 30) != 1)
+ if (INSTR (30, 30) != 1)
HALT_UNALLOC;
- index = uimm (aarch64_get_instr (cpu), 20, 20);
+ index = INSTR (20, 20);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_vec_u64 (cpu, vs, index));
}
@@ -5257,15 +5254,15 @@ do_vec_FABS (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd. */
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
unsigned i;
NYI_assert (29, 23, 0x1D);
NYI_assert (21, 10, 0x83E);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_NYI;
@@ -5293,16 +5290,16 @@ do_vec_FCVTZS (sim_cpu *cpu)
instr[9,5] = Rn
instr[4,0] = Rd. */
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
unsigned i;
NYI_assert (31, 31, 0);
NYI_assert (29, 23, 0x1D);
NYI_assert (21, 10, 0x86E);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_UNALLOC;
@@ -5330,16 +5327,16 @@ do_vec_op1 (sim_cpu *cpu)
instr[4,0] = Vd */
NYI_assert (29, 24, 0x0E);
- if (uimm (aarch64_get_instr (cpu), 21, 21) == 0)
+ if (INSTR (21, 21) == 0)
{
- if (uimm (aarch64_get_instr (cpu), 23, 22) == 0)
+ if (INSTR (23, 22) == 0)
{
- if (uimm (aarch64_get_instr (cpu), 30, 30) == 1
- && uimm (aarch64_get_instr (cpu), 17, 14) == 0
- && uimm (aarch64_get_instr (cpu), 12, 10) == 7)
+ if (INSTR (30, 30) == 1
+ && INSTR (17, 14) == 0
+ && INSTR (12, 10) == 7)
return do_vec_ins_2 (cpu);
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x01: do_vec_DUP_vector_into_vector (cpu); return;
case 0x03: do_vec_DUP_scalar_into_vector (cpu); return;
@@ -5347,7 +5344,7 @@ do_vec_op1 (sim_cpu *cpu)
case 0x0A: do_vec_TRN (cpu); return;
case 0x0F:
- if (uimm (aarch64_get_instr (cpu), 17, 16) == 0)
+ if (INSTR (17, 16) == 0)
{
do_vec_MOV_into_scalar (cpu);
return;
@@ -5373,7 +5370,7 @@ do_vec_op1 (sim_cpu *cpu)
}
}
- switch (uimm (aarch64_get_instr (cpu), 13, 10))
+ switch (INSTR (13, 10))
{
case 0x6: do_vec_UZP (cpu); return;
case 0xE: do_vec_ZIP (cpu); return;
@@ -5383,10 +5380,10 @@ do_vec_op1 (sim_cpu *cpu)
}
}
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x07:
- switch (uimm (aarch64_get_instr (cpu), 23, 21))
+ switch (INSTR (23, 21))
{
case 1: do_vec_AND (cpu); return;
case 3: do_vec_BIC (cpu); return;
@@ -5409,7 +5406,7 @@ do_vec_op1 (sim_cpu *cpu)
case 0x35: do_vec_fadd (cpu); return;
case 0x2E:
- switch (uimm (aarch64_get_instr (cpu), 20, 16))
+ switch (INSTR (20, 16))
{
case 0x00: do_vec_ABS (cpu); return;
case 0x01: do_vec_FCVTZS (cpu); return;
@@ -5452,29 +5449,29 @@ do_vec_xtl (sim_cpu *cpu)
instr[9,5] = V source
instr[4,0] = V dest. */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i, shift, bias = 0;
NYI_assert (28, 22, 0x3C);
NYI_assert (15, 10, 0x29);
- switch (uimm (aarch64_get_instr (cpu), 30, 29))
+ switch (INSTR (30, 29))
{
case 2: /* SXTL2, SSHLL2. */
bias = 2;
case 0: /* SXTL, SSHLL. */
- if (uimm (aarch64_get_instr (cpu), 21, 21))
+ if (INSTR (21, 21))
{
- shift = uimm (aarch64_get_instr (cpu), 20, 16);
+ shift = INSTR (20, 16);
aarch64_set_vec_s64
(cpu, vd, 0, aarch64_get_vec_s32 (cpu, vs, bias) << shift);
aarch64_set_vec_s64
(cpu, vd, 1, aarch64_get_vec_s32 (cpu, vs, bias + 1) << shift);
}
- else if (uimm (aarch64_get_instr (cpu), 20, 20))
+ else if (INSTR (20, 20))
{
- shift = uimm (aarch64_get_instr (cpu), 19, 16);
+ shift = INSTR (19, 16);
bias *= 2;
for (i = 0; i < 4; i++)
aarch64_set_vec_s32
@@ -5484,7 +5481,7 @@ do_vec_xtl (sim_cpu *cpu)
{
NYI_assert (19, 19, 1);
- shift = uimm (aarch64_get_instr (cpu), 18, 16);
+ shift = INSTR (18, 16);
bias *= 3;
for (i = 0; i < 8; i++)
aarch64_set_vec_s16
@@ -5495,17 +5492,17 @@ do_vec_xtl (sim_cpu *cpu)
case 3: /* UXTL2, USHLL2. */
bias = 2;
case 1: /* UXTL, USHLL. */
- if (uimm (aarch64_get_instr (cpu), 21, 21))
+ if (INSTR (21, 21))
{
- shift = uimm (aarch64_get_instr (cpu), 20, 16);
+ shift = INSTR (20, 16);
aarch64_set_vec_u64
(cpu, vd, 0, aarch64_get_vec_u32 (cpu, vs, bias) << shift);
aarch64_set_vec_u64
(cpu, vd, 1, aarch64_get_vec_u32 (cpu, vs, bias + 1) << shift);
}
- else if (uimm (aarch64_get_instr (cpu), 20, 20))
+ else if (INSTR (20, 20))
{
- shift = uimm (aarch64_get_instr (cpu), 19, 16);
+ shift = INSTR (19, 16);
bias *= 2;
for (i = 0; i < 4; i++)
aarch64_set_vec_u32
@@ -5515,7 +5512,7 @@ do_vec_xtl (sim_cpu *cpu)
{
NYI_assert (19, 19, 1);
- shift = uimm (aarch64_get_instr (cpu), 18, 16);
+ shift = INSTR (18, 16);
bias *= 3;
for (i = 0; i < 8; i++)
aarch64_set_vec_u16
@@ -5537,17 +5534,17 @@ do_vec_SHL (sim_cpu *cpu)
instr [4, 0] = Vd. */
int shift;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int full = INSTR (30, 30);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 23, 0x1E);
NYI_assert (15, 10, 0x15);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
- shift = uimm (aarch64_get_instr (cpu), 21, 16);
+ shift = INSTR (21, 16);
if (full == 0)
HALT_UNALLOC;
@@ -5561,9 +5558,9 @@ do_vec_SHL (sim_cpu *cpu)
return;
}
- if (uimm (aarch64_get_instr (cpu), 21, 21))
+ if (INSTR (21, 21))
{
- shift = uimm (aarch64_get_instr (cpu), 20, 16);
+ shift = INSTR (20, 16);
for (i = 0; i < (full ? 4 : 2); i++)
{
@@ -5574,9 +5571,9 @@ do_vec_SHL (sim_cpu *cpu)
return;
}
- if (uimm (aarch64_get_instr (cpu), 20, 20))
+ if (INSTR (20, 20))
{
- shift = uimm (aarch64_get_instr (cpu), 19, 16);
+ shift = INSTR (19, 16);
for (i = 0; i < (full ? 8 : 4); i++)
{
@@ -5587,10 +5584,10 @@ do_vec_SHL (sim_cpu *cpu)
return;
}
- if (uimm (aarch64_get_instr (cpu), 19, 19) == 0)
+ if (INSTR (19, 19) == 0)
HALT_UNALLOC;
- shift = uimm (aarch64_get_instr (cpu), 18, 16);
+ shift = INSTR (18, 16);
for (i = 0; i < (full ? 16 : 8); i++)
{
@@ -5621,7 +5618,7 @@ do_vec_SSHR_USHR (sim_cpu *cpu)
NYI_assert (28, 23, 0x1E);
NYI_assert (15, 10, 0x01);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
shift = 128 - shift;
@@ -5644,7 +5641,7 @@ do_vec_SSHR_USHR (sim_cpu *cpu)
return;
}
- if (uimm (aarch64_get_instr (cpu), 21, 21))
+ if (INSTR (21, 21))
{
shift = 64 - shift;
@@ -5664,7 +5661,7 @@ do_vec_SSHR_USHR (sim_cpu *cpu)
return;
}
- if (uimm (aarch64_get_instr (cpu), 20, 20))
+ if (INSTR (20, 20))
{
shift = 32 - shift;
@@ -5684,7 +5681,7 @@ do_vec_SSHR_USHR (sim_cpu *cpu)
return;
}
- if (uimm (aarch64_get_instr (cpu), 19, 19) == 0)
+ if (INSTR (19, 19) == 0)
HALT_UNALLOC;
shift = 16 - shift;
@@ -5719,12 +5716,12 @@ do_vec_MUL_by_element (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned L = uimm (aarch64_get_instr (cpu), 21, 21);
- unsigned H = uimm (aarch64_get_instr (cpu), 11, 11);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned size = uimm (aarch64_get_instr (cpu), 23, 22);
+ unsigned full = INSTR (30, 30);
+ unsigned L = INSTR (21, 21);
+ unsigned H = INSTR (11, 11);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned size = INSTR (23, 22);
unsigned index;
unsigned vm;
unsigned e;
@@ -5742,8 +5739,8 @@ do_vec_MUL_by_element (sim_cpu *cpu)
uint16_t element1;
uint16_t element2;
- index = (H << 2) | (L << 1) | uimm (aarch64_get_instr (cpu), 20, 20);
- vm = uimm (aarch64_get_instr (cpu), 19, 16);
+ index = (H << 2) | (L << 1) | INSTR (20, 20);
+ vm = INSTR (19, 16);
element2 = aarch64_get_vec_u16 (cpu, vm, index);
for (e = 0; e < (full ? 8 : 4); e ++)
@@ -5763,7 +5760,7 @@ do_vec_MUL_by_element (sim_cpu *cpu)
uint32_t element2;
index = (H << 1) | L;
- vm = uimm (aarch64_get_instr (cpu), 20, 16);
+ vm = INSTR (20, 16);
element2 = aarch64_get_vec_u32 (cpu, vm, index);
for (e = 0; e < (full ? 4 : 2); e ++)
@@ -5794,9 +5791,9 @@ do_vec_op2 (sim_cpu *cpu)
NYI_assert (29, 24, 0x0F);
- if (uimm (aarch64_get_instr (cpu), 23, 23) != 0)
+ if (INSTR (23, 23) != 0)
{
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x20:
case 0x22: do_vec_MUL_by_element (cpu); return;
@@ -5805,7 +5802,7 @@ do_vec_op2 (sim_cpu *cpu)
}
else
{
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x01: do_vec_SSHR_USHR (cpu); return;
case 0x15: do_vec_SHL (cpu); return;
@@ -5828,15 +5825,15 @@ do_vec_neg (sim_cpu *cpu)
instr[9,5] = Vs
instr[4,0] = Vd */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int full = INSTR (30, 30);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 24, 0x2E);
NYI_assert (21, 10, 0x82E);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -5873,15 +5870,15 @@ do_vec_sqrt (sim_cpu *cpu)
instr[9,5] = Vs
instr[4,0] = Vd. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int full = INSTR (30, 30);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 23, 0x5B);
NYI_assert (21, 10, 0x87E);
- if (uimm (aarch64_get_instr (cpu), 22, 22) == 0)
+ if (INSTR (22, 22) == 0)
for (i = 0; i < (full ? 4 : 2); i++)
aarch64_set_vec_float (cpu, vd, i,
sqrtf (aarch64_get_vec_float (cpu, vs, i)));
@@ -5907,16 +5904,16 @@ do_vec_mls_indexed (sim_cpu *cpu)
instr[9,5] = Vs
instr[4,0] = Vd. */
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
+ int full = INSTR (30, 30);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned vm = INSTR (20, 16);
unsigned i;
NYI_assert (15, 12, 4);
NYI_assert (10, 10, 0);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 1:
{
@@ -5926,8 +5923,8 @@ do_vec_mls_indexed (sim_cpu *cpu)
if (vm > 15)
HALT_NYI;
- elem = (uimm (aarch64_get_instr (cpu), 21, 20) << 1)
- | uimm (aarch64_get_instr (cpu), 11, 11);
+ elem = (INSTR (21, 20) << 1)
+ | INSTR (11, 11);
val = aarch64_get_vec_u16 (cpu, vm, elem);
for (i = 0; i < (full ? 8 : 4); i++)
@@ -5939,8 +5936,8 @@ do_vec_mls_indexed (sim_cpu *cpu)
case 2:
{
- unsigned elem = (uimm (aarch64_get_instr (cpu), 21, 21) << 1)
- | uimm (aarch64_get_instr (cpu), 11, 11);
+ unsigned elem = (INSTR (21, 21) << 1)
+ | INSTR (11, 11);
uint64_t val = aarch64_get_vec_u32 (cpu, vm, elem);
for (i = 0; i < (full ? 4 : 2); i++)
@@ -5970,17 +5967,17 @@ do_vec_SUB (sim_cpu *cpu)
instr [9, 5] = Vn
instr [4, 0] = Vd. */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 24, 0x2E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x21);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -6028,17 +6025,17 @@ do_vec_MLS (sim_cpu *cpu)
instr [9, 5] = Vn
instr [4, 0] = Vd. */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 24, 0x2E);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x25);
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0:
for (i = 0; i < (full ? 16 : 8); i++)
@@ -6082,17 +6079,17 @@ do_vec_FDIV (sim_cpu *cpu)
instr [9, 5] = Vn
instr [4, 0] = Vd. */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 23, 0x5C);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x3F);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_UNALLOC;
@@ -6122,17 +6119,17 @@ do_vec_FMUL (sim_cpu *cpu)
instr [9, 5] = Vn
instr [4, 0] = Vd. */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
NYI_assert (29, 23, 0x5C);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x37);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_UNALLOC;
@@ -6162,16 +6159,16 @@ do_vec_FADDP (sim_cpu *cpu)
instr [9, 5] = Vn
instr [4, 0] = Vd. */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned full = INSTR (30, 30);
+ unsigned vm = INSTR (20, 16);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
NYI_assert (29, 23, 0x5C);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x35);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
/* Extract values before adding them incase vd == vn/vm. */
double tmp1 = aarch64_get_vec_double (cpu, vn, 0);
@@ -6224,15 +6221,15 @@ do_vec_FSQRT (sim_cpu *cpu)
instr[9,5] = Vsrc
instr[4,0] = Vdest. */
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
int i;
NYI_assert (29, 23, 0x5D);
NYI_assert (21, 10, 0x87E);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_UNALLOC;
@@ -6260,15 +6257,15 @@ do_vec_FNEG (sim_cpu *cpu)
instr[9,5] = Vsrc
instr[4,0] = Vdest. */
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
+ unsigned full = INSTR (30, 30);
int i;
NYI_assert (29, 23, 0x5D);
NYI_assert (21, 10, 0x83E);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
if (! full)
HALT_UNALLOC;
@@ -6294,10 +6291,10 @@ do_vec_NOT (sim_cpu *cpu)
instr[9,5] = Vn
instr[4.0] = Vd. */
- unsigned vn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vn = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned i;
- int full = uimm (aarch64_get_instr (cpu), 30, 30);
+ int full = INSTR (30, 30);
NYI_assert (29, 10, 0xB8816);
@@ -6378,8 +6375,8 @@ do_vec_MOV_element (sim_cpu *cpu)
instr[9,5] = Vs
instr[4.0] = Vd. */
- unsigned vs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned vs = INSTR (9, 5);
+ unsigned vd = INSTR (4, 0);
unsigned src_index;
unsigned dst_index;
@@ -6387,29 +6384,29 @@ do_vec_MOV_element (sim_cpu *cpu)
NYI_assert (15, 15, 0);
NYI_assert (10, 10, 1);
- if (uimm (aarch64_get_instr (cpu), 16, 16))
+ if (INSTR (16, 16))
{
/* Move a byte. */
- src_index = uimm (aarch64_get_instr (cpu), 14, 11);
- dst_index = uimm (aarch64_get_instr (cpu), 20, 17);
+ src_index = INSTR (14, 11);
+ dst_index = INSTR (20, 17);
aarch64_set_vec_u8 (cpu, vd, dst_index,
aarch64_get_vec_u8 (cpu, vs, src_index));
}
- else if (uimm (aarch64_get_instr (cpu), 17, 17))
+ else if (INSTR (17, 17))
{
/* Move 16-bits. */
NYI_assert (11, 11, 0);
- src_index = uimm (aarch64_get_instr (cpu), 14, 12);
- dst_index = uimm (aarch64_get_instr (cpu), 20, 18);
+ src_index = INSTR (14, 12);
+ dst_index = INSTR (20, 18);
aarch64_set_vec_u16 (cpu, vd, dst_index,
aarch64_get_vec_u16 (cpu, vs, src_index));
}
- else if (uimm (aarch64_get_instr (cpu), 18, 18))
+ else if (INSTR (18, 18))
{
/* Move 32-bits. */
NYI_assert (12, 11, 0);
- src_index = uimm (aarch64_get_instr (cpu), 14, 13);
- dst_index = uimm (aarch64_get_instr (cpu), 20, 19);
+ src_index = INSTR (14, 13);
+ dst_index = INSTR (20, 19);
aarch64_set_vec_u32 (cpu, vd, dst_index,
aarch64_get_vec_u32 (cpu, vs, src_index));
}
@@ -6417,8 +6414,8 @@ do_vec_MOV_element (sim_cpu *cpu)
{
NYI_assert (19, 19, 1);
NYI_assert (13, 11, 0);
- src_index = uimm (aarch64_get_instr (cpu), 14, 14);
- dst_index = uimm (aarch64_get_instr (cpu), 20, 20);
+ src_index = INSTR (14, 14);
+ dst_index = INSTR (20, 20);
aarch64_set_vec_u64 (cpu, vd, dst_index,
aarch64_get_vec_u64 (cpu, vs, src_index));
}
@@ -6428,47 +6425,47 @@ static void
dexAdvSIMD0 (sim_cpu *cpu)
{
/* instr [28,25] = 0 111. */
- if ( uimm (aarch64_get_instr (cpu), 15, 10) == 0x07
- && (uimm (aarch64_get_instr (cpu), 9, 5) ==
- uimm (aarch64_get_instr (cpu), 20, 16)))
+ if ( INSTR (15, 10) == 0x07
+ && (INSTR (9, 5) ==
+ INSTR (20, 16)))
{
- if (uimm (aarch64_get_instr (cpu), 31, 21) == 0x075
- || uimm (aarch64_get_instr (cpu), 31, 21) == 0x275)
+ if (INSTR (31, 21) == 0x075
+ || INSTR (31, 21) == 0x275)
{
do_vec_MOV_whole_vector (cpu);
return;
}
}
- if (uimm (aarch64_get_instr (cpu), 29, 19) == 0x1E0)
+ if (INSTR (29, 19) == 0x1E0)
{
do_vec_MOV_immediate (cpu);
return;
}
- if (uimm (aarch64_get_instr (cpu), 29, 19) == 0x5E0)
+ if (INSTR (29, 19) == 0x5E0)
{
do_vec_MVNI (cpu);
return;
}
- if (uimm (aarch64_get_instr (cpu), 29, 19) == 0x1C0
- || uimm (aarch64_get_instr (cpu), 29, 19) == 0x1C1)
+ if (INSTR (29, 19) == 0x1C0
+ || INSTR (29, 19) == 0x1C1)
{
- if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x03)
+ if (INSTR (15, 10) == 0x03)
{
do_vec_DUP_scalar_into_vector (cpu);
return;
}
}
- switch (uimm (aarch64_get_instr (cpu), 29, 24))
+ switch (INSTR (29, 24))
{
case 0x0E: do_vec_op1 (cpu); return;
case 0x0F: do_vec_op2 (cpu); return;
case 0x2f:
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x01: do_vec_SSHR_USHR (cpu); return;
case 0x10:
@@ -6479,12 +6476,12 @@ dexAdvSIMD0 (sim_cpu *cpu)
}
case 0x2E:
- if (uimm (aarch64_get_instr (cpu), 21, 21) == 1)
+ if (INSTR (21, 21) == 1)
{
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x07:
- switch (uimm (aarch64_get_instr (cpu), 23, 22))
+ switch (INSTR (23, 22))
{
case 0: do_vec_EOR (cpu); return;
case 1: do_vec_BSL (cpu); return;
@@ -6507,7 +6504,7 @@ dexAdvSIMD0 (sim_cpu *cpu)
case 0x3F: do_vec_FDIV (cpu); return;
case 0x3E:
- switch (uimm (aarch64_get_instr (cpu), 20, 16))
+ switch (INSTR (20, 16))
{
case 0x00: do_vec_FNEG (cpu); return;
case 0x01: do_vec_FSQRT (cpu); return;
@@ -6531,18 +6528,18 @@ dexAdvSIMD0 (sim_cpu *cpu)
}
}
- if (uimm (aarch64_get_instr (cpu), 31, 21) == 0x370)
+ if (INSTR (31, 21) == 0x370)
{
do_vec_MOV_element (cpu);
return;
}
- switch (uimm (aarch64_get_instr (cpu), 21, 10))
+ switch (INSTR (21, 10))
{
case 0x82E: do_vec_neg (cpu); return;
case 0x87E: do_vec_sqrt (cpu); return;
default:
- if (uimm (aarch64_get_instr (cpu), 15, 10) == 0x30)
+ if (INSTR (15, 10) == 0x30)
{
do_vec_mull (cpu);
return;
@@ -6564,10 +6561,10 @@ dexAdvSIMD0 (sim_cpu *cpu)
static void
fmadds (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa)
+ aarch64_get_FP_float (cpu, sn)
@@ -6578,10 +6575,10 @@ fmadds (sim_cpu *cpu)
static void
fmaddd (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa)
+ aarch64_get_FP_double (cpu, sn)
@@ -6592,10 +6589,10 @@ fmaddd (sim_cpu *cpu)
static void
fmsubs (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sa)
- aarch64_get_FP_float (cpu, sn)
@@ -6606,10 +6603,10 @@ fmsubs (sim_cpu *cpu)
static void
fmsubd (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sa)
- aarch64_get_FP_double (cpu, sn)
@@ -6620,10 +6617,10 @@ fmsubd (sim_cpu *cpu)
static void
fnmadds (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa)
+ (- aarch64_get_FP_float (cpu, sn))
@@ -6634,10 +6631,10 @@ fnmadds (sim_cpu *cpu)
static void
fnmaddd (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa)
+ (- aarch64_get_FP_double (cpu, sn))
@@ -6648,10 +6645,10 @@ fnmaddd (sim_cpu *cpu)
static void
fnmsubs (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sa)
+ aarch64_get_FP_float (cpu, sn)
@@ -6662,10 +6659,10 @@ fnmsubs (sim_cpu *cpu)
static void
fnmsubd (sim_cpu *cpu)
{
- unsigned sa = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sa = INSTR (14, 10);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sa)
+ aarch64_get_FP_double (cpu, sn)
@@ -6684,11 +6681,11 @@ dexSimpleFPDataProc3Source (sim_cpu *cpu)
instr[21] ==> o1 : 0 ==> unnegated, 1 ==> negated
instr[15] ==> o2 : 0 ==> ADD, 1 ==> SUB */
- uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1)
- | uimm (aarch64_get_instr (cpu), 29, 29);
+ uint32_t M_S = (INSTR (31, 31) << 1)
+ | INSTR (29, 29);
/* dispatch on combined type:o1:o2. */
- uint32_t dispatch = (uimm (aarch64_get_instr (cpu), 23, 21) << 1)
- | uimm (aarch64_get_instr (cpu), 15, 15);
+ uint32_t dispatch = (INSTR (23, 21) << 1)
+ | INSTR (15, 15);
if (M_S != 0)
HALT_UNALLOC;
@@ -6760,7 +6757,7 @@ dexSimpleFPCondCompare (sim_cpu *cpu)
/* Single precision. */
float val1 = aarch64_get_vec_float (cpu, rn, 0);
float val2 = aarch64_get_vec_float (cpu, rm, 0);
-
+
/* FIXME: Check for NaNs. */
if (val1 == val2)
aarch64_set_CPSR (cpu, (Z | C));
@@ -6777,9 +6774,9 @@ dexSimpleFPCondCompare (sim_cpu *cpu)
static void
fadds (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
+ aarch64_get_FP_float (cpu, sm));
@@ -6789,9 +6786,9 @@ fadds (sim_cpu *cpu)
static void
faddd (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
+ aarch64_get_FP_double (cpu, sm));
@@ -6801,9 +6798,9 @@ faddd (sim_cpu *cpu)
static void
fdivs (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
/ aarch64_get_FP_float (cpu, sm));
@@ -6813,9 +6810,9 @@ fdivs (sim_cpu *cpu)
static void
fdivd (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
/ aarch64_get_FP_double (cpu, sm));
@@ -6825,9 +6822,9 @@ fdivd (sim_cpu *cpu)
static void
fmuls (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
* aarch64_get_FP_float (cpu, sm));
@@ -6837,9 +6834,9 @@ fmuls (sim_cpu *cpu)
static void
fmuld (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
* aarch64_get_FP_double (cpu, sm));
@@ -6849,9 +6846,9 @@ fmuld (sim_cpu *cpu)
static void
fnmuls (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, - (aarch64_get_FP_float (cpu, sn)
* aarch64_get_FP_float (cpu, sm)));
@@ -6861,9 +6858,9 @@ fnmuls (sim_cpu *cpu)
static void
fnmuld (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, - (aarch64_get_FP_double (cpu, sn)
* aarch64_get_FP_double (cpu, sm)));
@@ -6873,9 +6870,9 @@ fnmuld (sim_cpu *cpu)
static void
fsubs (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_float (cpu, sd, aarch64_get_FP_float (cpu, sn)
- aarch64_get_FP_float (cpu, sm));
@@ -6885,9 +6882,9 @@ fsubs (sim_cpu *cpu)
static void
fsubd (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
aarch64_set_FP_double (cpu, sd, aarch64_get_FP_double (cpu, sn)
- aarch64_get_FP_double (cpu, sm));
@@ -6904,14 +6901,14 @@ do_FMINNM (sim_cpu *cpu)
instr[9,5] = Sn
instr[4,0] = Cpu */
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
NYI_assert (31, 23, 0x03C);
NYI_assert (15, 10, 0x1E);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
aarch64_set_FP_double (cpu, sd,
dminnm (aarch64_get_FP_double (cpu, sn),
aarch64_get_FP_double (cpu, sm)));
@@ -6932,14 +6929,14 @@ do_FMAXNM (sim_cpu *cpu)
instr[9,5] = Sn
instr[4,0] = Cpu */
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
NYI_assert (31, 23, 0x03C);
NYI_assert (15, 10, 0x1A);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
aarch64_set_FP_double (cpu, sd,
dmaxnm (aarch64_get_FP_double (cpu, sn),
aarch64_get_FP_double (cpu, sm)));
@@ -6969,11 +6966,11 @@ dexSimpleFPDataProc2Source (sim_cpu *cpu)
instr[9,5] = Vn
instr[4,0] = Vd */
- uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1)
- | uimm (aarch64_get_instr (cpu), 29, 29);
- uint32_t type = uimm (aarch64_get_instr (cpu), 23, 22);
+ uint32_t M_S = (INSTR (31, 31) << 1)
+ | INSTR (29, 29);
+ uint32_t type = INSTR (23, 22);
/* Dispatch on opcode. */
- uint32_t dispatch = uimm (aarch64_get_instr (cpu), 15, 12);
+ uint32_t dispatch = INSTR (15, 12);
if (type > 1)
HALT_UNALLOC;
@@ -7032,15 +7029,15 @@ dexSimpleFPCondSelect (sim_cpu *cpu)
instr[11,10] = 11
instr[9,5] = Sn
instr[4,0] = Cpu */
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
- uint32_t set = testConditionCode (cpu, uimm (aarch64_get_instr (cpu), 15, 12));
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
+ unsigned sd = INSTR ( 4, 0);
+ uint32_t set = testConditionCode (cpu, INSTR (15, 12));
NYI_assert (31, 23, 0x03C);
NYI_assert (11, 10, 0x3);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
aarch64_set_FP_double (cpu, sd, set ? sn : sm);
else
aarch64_set_FP_float (cpu, sd, set ? sn : sm);
@@ -7050,8 +7047,8 @@ dexSimpleFPCondSelect (sim_cpu *cpu)
static void
fsturs (sim_cpu *cpu, int32_t offset)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_mem_u32 (cpu, aarch64_get_reg_u64 (cpu, st, 1) + offset,
aarch64_get_vec_u32 (cpu, rn, 0));
@@ -7061,8 +7058,8 @@ fsturs (sim_cpu *cpu, int32_t offset)
static void
fsturd (sim_cpu *cpu, int32_t offset)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_mem_u64 (cpu, aarch64_get_reg_u64 (cpu, st, 1) + offset,
aarch64_get_vec_u64 (cpu, rn, 0));
@@ -7072,8 +7069,8 @@ fsturd (sim_cpu *cpu, int32_t offset)
static void
fsturq (sim_cpu *cpu, int32_t offset)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
FRegister a;
aarch64_get_FP_long_double (cpu, rn, & a);
@@ -7088,8 +7085,8 @@ fsturq (sim_cpu *cpu, int32_t offset)
static void
ffmovs (sim_cpu *cpu)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_FP_float (cpu, st, aarch64_get_FP_float (cpu, rn));
}
@@ -7098,8 +7095,8 @@ ffmovs (sim_cpu *cpu)
static void
ffmovd (sim_cpu *cpu)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_FP_double (cpu, st, aarch64_get_FP_double (cpu, rn));
}
@@ -7108,8 +7105,8 @@ ffmovd (sim_cpu *cpu)
static void
fgmovs (sim_cpu *cpu)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_reg_u32 (cpu, rn, NO_SP));
}
@@ -7118,8 +7115,8 @@ fgmovs (sim_cpu *cpu)
static void
fgmovd (sim_cpu *cpu)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_reg_u64 (cpu, rn, NO_SP));
}
@@ -7128,8 +7125,8 @@ fgmovd (sim_cpu *cpu)
static void
gfmovs (sim_cpu *cpu)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u32 (cpu, rn, 0));
}
@@ -7138,8 +7135,8 @@ gfmovs (sim_cpu *cpu)
static void
gfmovd (sim_cpu *cpu)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, st, NO_SP, aarch64_get_vec_u64 (cpu, rn, 0));
}
@@ -7153,8 +7150,8 @@ gfmovd (sim_cpu *cpu)
static void
fmovs (sim_cpu *cpu)
{
- unsigned int sd = uimm (aarch64_get_instr (cpu), 4, 0);
- uint32_t imm = uimm (aarch64_get_instr (cpu), 20, 13);
+ unsigned int sd = INSTR (4, 0);
+ uint32_t imm = INSTR (20, 13);
float f = fp_immediate_for_encoding_32 (imm);
aarch64_set_FP_float (cpu, sd, f);
@@ -7163,8 +7160,8 @@ fmovs (sim_cpu *cpu)
static void
fmovd (sim_cpu *cpu)
{
- unsigned int sd = uimm (aarch64_get_instr (cpu), 4, 0);
- uint32_t imm = uimm (aarch64_get_instr (cpu), 20, 13);
+ unsigned int sd = INSTR (4, 0);
+ uint32_t imm = INSTR (20, 13);
double d = fp_immediate_for_encoding_64 (imm);
aarch64_set_FP_double (cpu, sd, d);
@@ -7180,14 +7177,14 @@ dexSimpleFPImmediate (sim_cpu *cpu)
instr[12,10] == 100
instr[9,5] == imm5 : 00000 ==> PK, ow ==> UNALLOC
instr[4,0] == Rd */
- uint32_t imm5 = uimm (aarch64_get_instr (cpu), 9, 5);
+ uint32_t imm5 = INSTR (9, 5);
NYI_assert (31, 23, 0x3C);
if (imm5 != 0)
HALT_UNALLOC;
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
fmovd (cpu);
else
fmovs (cpu);
@@ -7204,8 +7201,8 @@ dexSimpleFPImmediate (sim_cpu *cpu)
static void
fldurs (sim_cpu *cpu, int32_t offset)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_vec_u32 (cpu, st, 0, aarch64_get_mem_u32
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset));
@@ -7215,8 +7212,8 @@ fldurs (sim_cpu *cpu, int32_t offset)
static void
fldurd (sim_cpu *cpu, int32_t offset)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
aarch64_set_vec_u64 (cpu, st, 0, aarch64_get_mem_u64
(cpu, aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset));
@@ -7226,8 +7223,8 @@ fldurd (sim_cpu *cpu, int32_t offset)
static void
fldurq (sim_cpu *cpu, int32_t offset)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int st = INSTR (4, 0);
FRegister a;
uint64_t addr = aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset;
@@ -7245,8 +7242,8 @@ fldurq (sim_cpu *cpu, int32_t offset)
static void
fabss (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
float value = aarch64_get_FP_float (cpu, sn);
aarch64_set_FP_float (cpu, sd, fabsf (value));
@@ -7256,8 +7253,8 @@ fabss (sim_cpu *cpu)
static void
fabcpu (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
double value = aarch64_get_FP_double (cpu, sn);
aarch64_set_FP_double (cpu, sd, fabs (value));
@@ -7267,8 +7264,8 @@ fabcpu (sim_cpu *cpu)
static void
fnegs (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_float (cpu, sd, - aarch64_get_FP_float (cpu, sn));
}
@@ -7277,8 +7274,8 @@ fnegs (sim_cpu *cpu)
static void
fnegd (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_double (cpu, sd, - aarch64_get_FP_double (cpu, sn));
}
@@ -7287,8 +7284,8 @@ fnegd (sim_cpu *cpu)
static void
fsqrts (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_float (cpu, sd, sqrt (aarch64_get_FP_float (cpu, sn)));
}
@@ -7297,8 +7294,8 @@ fsqrts (sim_cpu *cpu)
static void
fsqrtd (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_double (cpu, sd,
sqrt (aarch64_get_FP_double (cpu, sn)));
@@ -7308,8 +7305,8 @@ fsqrtd (sim_cpu *cpu)
static void
fcvtds (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_float (cpu, sd, (float) aarch64_get_FP_double (cpu, sn));
}
@@ -7318,8 +7315,8 @@ fcvtds (sim_cpu *cpu)
static void
fcvtcpu (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_double (cpu, sd, (double) aarch64_get_FP_float (cpu, sn));
}
@@ -7336,9 +7333,9 @@ do_FRINT (sim_cpu *cpu)
instr[4,0] = dest */
float val;
- unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned int rmode = uimm (aarch64_get_instr (cpu), 17, 15);
+ unsigned rs = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned int rmode = INSTR (17, 15);
NYI_assert (31, 23, 0x03C);
NYI_assert (21, 18, 0x9);
@@ -7348,7 +7345,7 @@ do_FRINT (sim_cpu *cpu)
/* FIXME: Add support for rmode == 6 exactness check. */
rmode = uimm (aarch64_get_FPSR (cpu), 23, 22);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
double val = aarch64_get_FP_double (cpu, rs);
@@ -7450,7 +7447,7 @@ do_FRINT (sim_cpu *cpu)
/* Convert half to float. */
static void
-do_FCVT_half_to_single (sim_cpu * cpu)
+do_FCVT_half_to_single (sim_cpu *cpu)
{
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
@@ -7462,18 +7459,18 @@ do_FCVT_half_to_single (sim_cpu * cpu)
/* Convert half to float. */
static void
-do_FCVT_half_to_double (sim_cpu * cpu)
+do_FCVT_half_to_double (sim_cpu *cpu)
{
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
NYI_assert (31, 10, 0x7B8B0);
-
+
aarch64_set_FP_double (cpu, rd, (double) aarch64_get_FP_half (cpu, rn));
}
static void
-do_FCVT_single_to_half (sim_cpu * cpu)
+do_FCVT_single_to_half (sim_cpu *cpu)
{
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
@@ -7485,13 +7482,13 @@ do_FCVT_single_to_half (sim_cpu * cpu)
/* Convert half to float. */
static void
-do_FCVT_double_to_half (sim_cpu * cpu)
+do_FCVT_double_to_half (sim_cpu *cpu)
{
unsigned rn = INSTR (9, 5);
unsigned rd = INSTR (4, 0);
NYI_assert (31, 10, 0x798F0);
-
+
aarch64_set_FP_half (cpu, rd, (float) aarch64_get_FP_double (cpu, rn));
}
@@ -7522,10 +7519,10 @@ dexSimpleFPDataProc1Source (sim_cpu *cpu)
000101 ==> FCVT (half-to-double)
instr[14,10] = 10000. */
- uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1)
- | uimm (aarch64_get_instr (cpu), 29, 29);
- uint32_t type = uimm (aarch64_get_instr (cpu), 23, 22);
- uint32_t opcode = uimm (aarch64_get_instr (cpu), 20, 15);
+ uint32_t M_S = (INSTR (31, 31) << 1)
+ | INSTR (29, 29);
+ uint32_t type = INSTR (23, 22);
+ uint32_t opcode = INSTR (20, 15);
if (M_S != 0)
HALT_UNALLOC;
@@ -7616,8 +7613,8 @@ dexSimpleFPDataProc1Source (sim_cpu *cpu)
static void
scvtf32 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_float
(cpu, sd, (float) aarch64_get_reg_s32 (cpu, rn, NO_SP));
@@ -7627,8 +7624,8 @@ scvtf32 (sim_cpu *cpu)
static void
scvtf (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_float
(cpu, sd, (float) aarch64_get_reg_s64 (cpu, rn, NO_SP));
@@ -7638,8 +7635,8 @@ scvtf (sim_cpu *cpu)
static void
scvtd32 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_double
(cpu, sd, (double) aarch64_get_reg_s32 (cpu, rn, NO_SP));
@@ -7649,8 +7646,8 @@ scvtd32 (sim_cpu *cpu)
static void
scvtd (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned sd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned sd = INSTR (4, 0);
aarch64_set_FP_double
(cpu, sd, (double) aarch64_get_reg_s64 (cpu, rn, NO_SP));
@@ -7714,8 +7711,8 @@ static const double DOUBLE_LONG_MIN = (double) LONG_MIN;
static void
fcvtszs32 (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* TODO : check that this rounds toward zero. */
float f = aarch64_get_FP_float (cpu, sn);
int32_t value = (int32_t) f;
@@ -7730,8 +7727,8 @@ fcvtszs32 (sim_cpu *cpu)
static void
fcvtszs (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
float f = aarch64_get_FP_float (cpu, sn);
int64_t value = (int64_t) f;
@@ -7744,8 +7741,8 @@ fcvtszs (sim_cpu *cpu)
static void
fcvtszd32 (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* TODO : check that this rounds toward zero. */
double d = aarch64_get_FP_double (cpu, sn);
int32_t value = (int32_t) d;
@@ -7760,8 +7757,8 @@ fcvtszd32 (sim_cpu *cpu)
static void
fcvtszd (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* TODO : check that this rounds toward zero. */
double d = aarch64_get_FP_double (cpu, sn);
int64_t value;
@@ -7785,20 +7782,20 @@ do_fcvtzu (sim_cpu *cpu)
instr[9,5] = Rs
instr[4,0] = Rd. */
- unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rs = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
NYI_assert (30, 23, 0x3C);
NYI_assert (20, 16, 0x19);
- if (uimm (aarch64_get_instr (cpu), 21, 21) != 1)
+ if (INSTR (21, 21) != 1)
/* Convert to fixed point. */
HALT_NYI;
- if (uimm (aarch64_get_instr (cpu), 31, 31))
+ if (INSTR (31, 31))
{
/* Convert to unsigned 64-bit integer. */
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
double d = aarch64_get_FP_double (cpu, rs);
uint64_t value = (uint64_t) d;
@@ -7826,7 +7823,7 @@ do_fcvtzu (sim_cpu *cpu)
uint32_t value;
/* Convert to unsigned 32-bit integer. */
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
double d = aarch64_get_FP_double (cpu, rs);
@@ -7861,21 +7858,21 @@ do_UCVTF (sim_cpu *cpu)
instr[9,5] = Rs
instr[4,0] = Rd. */
- unsigned rs = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rs = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
NYI_assert (30, 23, 0x3C);
NYI_assert (20, 16, 0x03);
- if (uimm (aarch64_get_instr (cpu), 21, 21) != 1)
+ if (INSTR (21, 21) != 1)
HALT_NYI;
/* FIXME: Add exception raising. */
- if (uimm (aarch64_get_instr (cpu), 31, 31))
+ if (INSTR (31, 31))
{
uint64_t value = aarch64_get_reg_u64 (cpu, rs, NO_SP);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
aarch64_set_FP_double (cpu, rd, (double) value);
else
aarch64_set_FP_float (cpu, rd, (float) value);
@@ -7884,7 +7881,7 @@ do_UCVTF (sim_cpu *cpu)
{
uint32_t value = aarch64_get_reg_u32 (cpu, rs, NO_SP);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
aarch64_set_FP_double (cpu, rd, (double) value);
else
aarch64_set_FP_float (cpu, rd, (float) value);
@@ -7900,15 +7897,15 @@ float_vector_move (sim_cpu *cpu)
instr[9,5] ==> source
instr[4,0] ==> dest. */
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
NYI_assert (31, 17, 0x4F57);
- if (uimm (aarch64_get_instr (cpu), 15, 10) != 0)
+ if (INSTR (15, 10) != 0)
HALT_UNALLOC;
- if (uimm (aarch64_get_instr (cpu), 16, 16))
+ if (INSTR (16, 16))
aarch64_set_vec_u64 (cpu, rd, 1, aarch64_get_reg_u64 (cpu, rn, NO_SP));
else
aarch64_set_reg_u64 (cpu, rd, NO_SP, aarch64_get_vec_u64 (cpu, rn, 1));
@@ -7934,22 +7931,22 @@ dexSimpleFPIntegerConvert (sim_cpu *cpu)
uint32_t size;
uint32_t S;
- if (uimm (aarch64_get_instr (cpu), 31, 17) == 0x4F57)
+ if (INSTR (31, 17) == 0x4F57)
{
float_vector_move (cpu);
return;
}
- size = uimm (aarch64_get_instr (cpu), 31, 31);
- S = uimm (aarch64_get_instr (cpu), 29, 29);
+ size = INSTR (31, 31);
+ S = INSTR (29, 29);
if (S != 0)
HALT_UNALLOC;
- type = uimm (aarch64_get_instr (cpu), 23, 22);
+ type = INSTR (23, 22);
if (type > 1)
HALT_UNALLOC;
- rmode_opcode = uimm (aarch64_get_instr (cpu), 20, 16);
+ rmode_opcode = INSTR (20, 16);
size_type = (size << 1) | type; /* 0==32f, 1==32d, 2==64f, 3==64d. */
switch (rmode_opcode)
@@ -8029,8 +8026,8 @@ set_flags_for_float_compare (sim_cpu *cpu, float fvalue1, float fvalue2)
static void
fcmps (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
float fvalue1 = aarch64_get_FP_float (cpu, sn);
float fvalue2 = aarch64_get_FP_float (cpu, sm);
@@ -8043,7 +8040,7 @@ fcmps (sim_cpu *cpu)
static void
fcmpzs (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sn = INSTR ( 9, 5);
float fvalue1 = aarch64_get_FP_float (cpu, sn);
set_flags_for_float_compare (cpu, fvalue1, 0.0f);
@@ -8053,8 +8050,8 @@ fcmpzs (sim_cpu *cpu)
static void
fcmpes (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
float fvalue1 = aarch64_get_FP_float (cpu, sn);
float fvalue2 = aarch64_get_FP_float (cpu, sm);
@@ -8066,7 +8063,7 @@ fcmpes (sim_cpu *cpu)
static void
fcmpzes (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sn = INSTR ( 9, 5);
float fvalue1 = aarch64_get_FP_float (cpu, sn);
set_flags_for_float_compare (cpu, fvalue1, 0.0f);
@@ -8098,8 +8095,8 @@ set_flags_for_double_compare (sim_cpu *cpu, double dval1, double dval2)
static void
fcmpd (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
double dvalue1 = aarch64_get_FP_double (cpu, sn);
double dvalue2 = aarch64_get_FP_double (cpu, sm);
@@ -8112,7 +8109,7 @@ fcmpd (sim_cpu *cpu)
static void
fcmpzd (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sn = INSTR ( 9, 5);
double dvalue1 = aarch64_get_FP_double (cpu, sn);
set_flags_for_double_compare (cpu, dvalue1, 0.0);
@@ -8122,8 +8119,8 @@ fcmpzd (sim_cpu *cpu)
static void
fcmped (sim_cpu *cpu)
{
- unsigned sm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sm = INSTR (20, 16);
+ unsigned sn = INSTR ( 9, 5);
double dvalue1 = aarch64_get_FP_double (cpu, sn);
double dvalue2 = aarch64_get_FP_double (cpu, sm);
@@ -8135,7 +8132,7 @@ fcmped (sim_cpu *cpu)
static void
fcmpzed (sim_cpu *cpu)
{
- unsigned sn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned sn = INSTR ( 9, 5);
double dvalue1 = aarch64_get_FP_double (cpu, sn);
set_flags_for_double_compare (cpu, dvalue1, 0.0);
@@ -8154,11 +8151,11 @@ dexSimpleFPCompare (sim_cpu *cpu)
01000 ==> FCMPZ, 11000 ==> FCMPEZ,
ow ==> UNALLOC */
uint32_t dispatch;
- uint32_t M_S = (uimm (aarch64_get_instr (cpu), 31, 31) << 1)
- | uimm (aarch64_get_instr (cpu), 29, 29);
- uint32_t type = uimm (aarch64_get_instr (cpu), 23, 22);
- uint32_t op = uimm (aarch64_get_instr (cpu), 15, 14);
- uint32_t op2_2_0 = uimm (aarch64_get_instr (cpu), 2, 0);
+ uint32_t M_S = (INSTR (31, 31) << 1)
+ | INSTR (29, 29);
+ uint32_t type = INSTR (23, 22);
+ uint32_t op = INSTR (15, 14);
+ uint32_t op2_2_0 = INSTR (2, 0);
if (op2_2_0 != 0)
HALT_UNALLOC;
@@ -8173,7 +8170,7 @@ dexSimpleFPCompare (sim_cpu *cpu)
HALT_UNALLOC;
/* dispatch on type and top 2 bits of opcode. */
- dispatch = (type << 2) | uimm (aarch64_get_instr (cpu), 4, 3);
+ dispatch = (type << 2) | INSTR (4, 3);
switch (dispatch)
{
@@ -8197,13 +8194,13 @@ do_scalar_FADDP (sim_cpu *cpu)
instr [9,5] = Fn
instr [4,0] = Fd. */
- unsigned Fn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned Fd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned Fn = INSTR (9, 5);
+ unsigned Fd = INSTR (4, 0);
NYI_assert (31, 23, 0x0FC);
NYI_assert (21, 10, 0xC36);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
double val1 = aarch64_get_vec_double (cpu, Fn, 0);
double val2 = aarch64_get_vec_double (cpu, Fn, 1);
@@ -8232,15 +8229,15 @@ do_scalar_FABD (sim_cpu *cpu)
instr [9, 5] = Rn
instr [4, 0] = Rd. */
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
NYI_assert (31, 23, 0x0FD);
NYI_assert (21, 21, 1);
NYI_assert (15, 10, 0x35);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
aarch64_set_FP_double (cpu, rd,
fabs (aarch64_get_FP_double (cpu, rn)
- aarch64_get_FP_double (cpu, rm)));
@@ -8259,9 +8256,9 @@ do_scalar_CMGT (sim_cpu *cpu)
instr [9, 5] = Rn
instr [4, 0] = Rd. */
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
NYI_assert (31, 21, 0x2F7);
NYI_assert (15, 10, 0x0D);
@@ -8313,7 +8310,7 @@ do_scalar_SSHL (sim_cpu *cpu)
aarch64_get_vec_s64 (cpu, rn, 0) << shift);
else
aarch64_set_vec_s64 (cpu, rd, 0,
- aarch64_get_vec_s64 (cpu, rn, 0) >> - shift);
+ aarch64_get_vec_s64 (cpu, rn, 0) >> - shift);
}
static void
@@ -8369,12 +8366,12 @@ do_scalar_FCM (sim_cpu *cpu)
instr [9, 5] = Rn
instr [4, 0] = Rd. */
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned EUac = (uimm (aarch64_get_instr (cpu), 23, 23) << 2)
- | (uimm (aarch64_get_instr (cpu), 29, 29) << 1)
- | uimm (aarch64_get_instr (cpu), 11, 11);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+ unsigned EUac = (INSTR (23, 23) << 2)
+ | (INSTR (29, 29) << 1)
+ | INSTR (11, 11);
unsigned result;
float val1;
float val2;
@@ -8385,7 +8382,7 @@ do_scalar_FCM (sim_cpu *cpu)
NYI_assert (15, 12, 0xE);
NYI_assert (10, 10, 1);
- if (uimm (aarch64_get_instr (cpu), 22, 22))
+ if (INSTR (22, 22))
{
double val1 = aarch64_get_FP_double (cpu, rn);
double val2 = aarch64_get_FP_double (cpu, rm);
@@ -8462,38 +8459,38 @@ do_scalar_MOV (sim_cpu *cpu)
instr [9, 5] = Rn
instr [4, 0] = Rd. */
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
unsigned index;
NYI_assert (31, 21, 0x2F0);
NYI_assert (15, 10, 0x01);
- if (uimm (aarch64_get_instr (cpu), 16, 16))
+ if (INSTR (16, 16))
{
/* 8-bit. */
- index = uimm (aarch64_get_instr (cpu), 20, 17);
+ index = INSTR (20, 17);
aarch64_set_vec_u8
(cpu, rd, 0, aarch64_get_vec_u8 (cpu, rn, index));
}
- else if (uimm (aarch64_get_instr (cpu), 17, 17))
+ else if (INSTR (17, 17))
{
/* 16-bit. */
- index = uimm (aarch64_get_instr (cpu), 20, 18);
+ index = INSTR (20, 18);
aarch64_set_vec_u16
(cpu, rd, 0, aarch64_get_vec_u16 (cpu, rn, index));
}
- else if (uimm (aarch64_get_instr (cpu), 18, 18))
+ else if (INSTR (18, 18))
{
/* 32-bit. */
- index = uimm (aarch64_get_instr (cpu), 20, 19);
+ index = INSTR (20, 19);
aarch64_set_vec_u32
(cpu, rd, 0, aarch64_get_vec_u32 (cpu, rn, index));
}
- else if (uimm (aarch64_get_instr (cpu), 19, 19))
+ else if (INSTR (19, 19))
{
/* 64-bit. */
- index = uimm (aarch64_get_instr (cpu), 20, 20);
+ index = INSTR (20, 20);
aarch64_set_vec_u64
(cpu, rd, 0, aarch64_get_vec_u64 (cpu, rn, index));
}
@@ -8508,8 +8505,8 @@ do_scalar_NEG (sim_cpu *cpu)
instr [9, 5] = Rn
instr [4, 0] = Rd. */
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
NYI_assert (31, 10, 0x1FB82E);
@@ -8571,10 +8568,10 @@ do_scalar_vec (sim_cpu *cpu)
{
/* instr [30] = 1. */
/* instr [28,25] = 1111. */
- switch (uimm (aarch64_get_instr (cpu), 31, 23))
+ switch (INSTR (31, 23))
{
case 0xBC:
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x01: do_scalar_MOV (cpu); return;
case 0x39: do_scalar_FCM (cpu); return;
@@ -8585,7 +8582,7 @@ do_scalar_vec (sim_cpu *cpu)
case 0xBE: do_scalar_shift (cpu); return;
case 0xFC:
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x36: do_scalar_FADDP (cpu); return;
case 0x39: do_scalar_FCM (cpu); return;
@@ -8594,7 +8591,7 @@ do_scalar_vec (sim_cpu *cpu)
break;
case 0xFD:
- switch (uimm (aarch64_get_instr (cpu), 15, 10))
+ switch (INSTR (15, 10))
{
case 0x0D: do_scalar_CMGT (cpu); return;
case 0x11: do_scalar_USHL (cpu); return;
@@ -8616,7 +8613,7 @@ do_scalar_vec (sim_cpu *cpu)
default:
HALT_NYI;
}
-
+
default:
HALT_NYI;
}
@@ -8629,23 +8626,23 @@ dexAdvSIMD1 (sim_cpu *cpu)
/* We are currently only interested in the basic
scalar fp routines which all have bit 30 = 0. */
- if (uimm (aarch64_get_instr (cpu), 30, 30))
+ if (INSTR (30, 30))
do_scalar_vec (cpu);
/* instr[24] is set for FP data processing 3-source and clear for
all other basic scalar fp instruction groups. */
- else if (uimm (aarch64_get_instr (cpu), 24, 24))
+ else if (INSTR (24, 24))
dexSimpleFPDataProc3Source (cpu);
/* instr[21] is clear for floating <-> fixed conversions and set for
all other basic scalar fp instruction groups. */
- else if (!uimm (aarch64_get_instr (cpu), 21, 21))
+ else if (!INSTR (21, 21))
dexSimpleFPFixedConvert (cpu);
/* instr[11,10] : 01 ==> cond compare, 10 ==> Data Proc 2 Source
11 ==> cond select, 00 ==> other. */
else
- switch (uimm (aarch64_get_instr (cpu), 11, 10))
+ switch (INSTR (11, 10))
{
case 1: dexSimpleFPCondCompare (cpu); return;
case 2: dexSimpleFPDataProc2Source (cpu); return;
@@ -8653,20 +8650,20 @@ dexAdvSIMD1 (sim_cpu *cpu)
default:
/* Now an ordered cascade of tests.
- FP immediate has aarch64_get_instr (cpu)[12] == 1.
- FP compare has aarch64_get_instr (cpu)[13] == 1.
- FP Data Proc 1 Source has aarch64_get_instr (cpu)[14] == 1.
- FP floating <--> integer conversions has aarch64_get_instr (cpu)[15] == 0. */
- if (uimm (aarch64_get_instr (cpu), 12, 12))
+ FP immediate has instr [12] == 1.
+ FP compare has instr [13] == 1.
+ FP Data Proc 1 Source has instr [14] == 1.
+ FP floating <--> integer conversions has instr [15] == 0. */
+ if (INSTR (12, 12))
dexSimpleFPImmediate (cpu);
- else if (uimm (aarch64_get_instr (cpu), 13, 13))
+ else if (INSTR (13, 13))
dexSimpleFPCompare (cpu);
- else if (uimm (aarch64_get_instr (cpu), 14, 14))
+ else if (INSTR (14, 14))
dexSimpleFPDataProc1Source (cpu);
- else if (!uimm (aarch64_get_instr (cpu), 15, 15))
+ else if (!INSTR (15, 15))
dexSimpleFPIntegerConvert (cpu);
else
@@ -8684,14 +8681,14 @@ pcadr (sim_cpu *cpu)
instr[30,29] = immlo
instr[23,5] = immhi. */
uint64_t address;
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- uint32_t isPage = uimm (aarch64_get_instr (cpu), 31, 31);
+ unsigned rd = INSTR (4, 0);
+ uint32_t isPage = INSTR (31, 31);
union { int64_t u64; uint64_t s64; } imm;
uint64_t offset;
imm.s64 = simm64 (aarch64_get_instr (cpu), 23, 5);
offset = imm.u64;
- offset = (offset << 2) | uimm (aarch64_get_instr (cpu), 30, 29);
+ offset = (offset << 2) | INSTR (30, 29);
address = aarch64_get_PC (cpu);
@@ -8727,8 +8724,8 @@ dexPCRelAddressing (sim_cpu *cpu)
static void
and32 (sim_cpu *cpu, uint32_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, NO_SP) & bimm);
@@ -8738,8 +8735,8 @@ and32 (sim_cpu *cpu, uint32_t bimm)
static void
and64 (sim_cpu *cpu, uint64_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, NO_SP) & bimm);
@@ -8749,8 +8746,8 @@ and64 (sim_cpu *cpu, uint64_t bimm)
static void
ands32 (sim_cpu *cpu, uint32_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = bimm;
@@ -8763,8 +8760,8 @@ ands32 (sim_cpu *cpu, uint32_t bimm)
static void
ands64 (sim_cpu *cpu, uint64_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = bimm;
@@ -8777,8 +8774,8 @@ ands64 (sim_cpu *cpu, uint64_t bimm)
static void
eor32 (sim_cpu *cpu, uint32_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, NO_SP) ^ bimm);
@@ -8788,8 +8785,8 @@ eor32 (sim_cpu *cpu, uint32_t bimm)
static void
eor64 (sim_cpu *cpu, uint64_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, NO_SP) ^ bimm);
@@ -8799,8 +8796,8 @@ eor64 (sim_cpu *cpu, uint64_t bimm)
static void
orr32 (sim_cpu *cpu, uint32_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u32 (cpu, rn, NO_SP) | bimm);
@@ -8810,8 +8807,8 @@ orr32 (sim_cpu *cpu, uint32_t bimm)
static void
orr64 (sim_cpu *cpu, uint64_t bimm)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, SP_OK,
aarch64_get_reg_u64 (cpu, rn, NO_SP) | bimm);
@@ -8826,9 +8823,9 @@ orr64 (sim_cpu *cpu, uint64_t bimm)
static void
and32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -8839,9 +8836,9 @@ and32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
and64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -8852,9 +8849,9 @@ and64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
ands32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -8868,9 +8865,9 @@ ands32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
ands64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
@@ -8884,9 +8881,9 @@ ands64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
bic32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -8897,9 +8894,9 @@ bic32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
bic64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -8910,9 +8907,9 @@ bic64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
bics32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value1 = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t value2 = ~ shifted32 (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -8926,9 +8923,9 @@ bics32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
bics64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t value2 = ~ shifted64 (aarch64_get_reg_u64 (cpu, rm, NO_SP),
@@ -8942,9 +8939,9 @@ bics64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
eon32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -8955,9 +8952,9 @@ eon32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
eon64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -8968,9 +8965,9 @@ eon64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
eor32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -8981,9 +8978,9 @@ eor32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
eor64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -8994,9 +8991,9 @@ eor64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
orr32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -9007,9 +9004,9 @@ orr32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
orr64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -9020,9 +9017,9 @@ orr64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
orn32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u32 (cpu, rn, NO_SP)
@@ -9033,9 +9030,9 @@ orn32_shift (sim_cpu *cpu, Shift shift, uint32_t count)
static void
orn64_shift (sim_cpu *cpu, Shift shift, uint32_t count)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, aarch64_get_reg_u64 (cpu, rn, NO_SP)
@@ -9055,13 +9052,13 @@ dexLogicalImmediate (sim_cpu *cpu)
instr[4,0] = Rd */
/* 32 bit operations must have N = 0 or else we have an UNALLOC. */
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
- uint32_t N = uimm (aarch64_get_instr (cpu), 22, 22);
- /* uint32_t immr = uimm (aarch64_get_instr (cpu), 21, 16);. */
- /* uint32_t imms = uimm (aarch64_get_instr (cpu), 15, 10);. */
- uint32_t index = uimm (aarch64_get_instr (cpu), 22, 10);
+ uint32_t size = INSTR (31, 31);
+ uint32_t N = INSTR (22, 22);
+ /* uint32_t immr = INSTR (21, 16);. */
+ /* uint32_t imms = INSTR (15, 10);. */
+ uint32_t index = INSTR (22, 10);
uint64_t bimm64 = LITable [index];
- uint32_t dispatch = uimm (aarch64_get_instr (cpu), 30, 29);
+ uint32_t dispatch = INSTR (30, 29);
if (~size & N)
HALT_UNALLOC;
@@ -9106,7 +9103,7 @@ dexLogicalImmediate (sim_cpu *cpu)
static void
movz32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, val << (pos * 16));
}
@@ -9115,7 +9112,7 @@ movz32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
static void
movz64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, ((uint64_t) val) << (pos * 16));
}
@@ -9124,7 +9121,7 @@ movz64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
static void
movn32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, ((val << (pos * 16)) ^ 0xffffffffU));
}
@@ -9133,7 +9130,7 @@ movn32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
static void
movn64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, ((((uint64_t) val) << (pos * 16))
@@ -9144,7 +9141,7 @@ movn64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
static void
movk32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
uint32_t current = aarch64_get_reg_u32 (cpu, rd, NO_SP);
uint32_t value = val << (pos * 16);
uint32_t mask = ~(0xffffU << (pos * 16));
@@ -9156,7 +9153,7 @@ movk32 (sim_cpu *cpu, uint32_t val, uint32_t pos)
static void
movk64 (sim_cpu *cpu, uint32_t val, uint32_t pos)
{
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rd = INSTR (4, 0);
uint64_t current = aarch64_get_reg_u64 (cpu, rd, NO_SP);
uint64_t value = (uint64_t) val << (pos * 16);
uint64_t mask = ~(0xffffULL << (pos * 16));
@@ -9178,9 +9175,9 @@ dexMoveWideImmediate (sim_cpu *cpu)
we just pass the multiplier. */
uint32_t imm;
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
- uint32_t op = uimm (aarch64_get_instr (cpu), 30, 29);
- uint32_t shift = uimm (aarch64_get_instr (cpu), 22, 21);
+ uint32_t size = INSTR (31, 31);
+ uint32_t op = INSTR (30, 29);
+ uint32_t shift = INSTR (22, 21);
/* 32 bit can only shift 0 or 1 lot of 16.
anything else is an unallocated instruction. */
@@ -9190,7 +9187,7 @@ dexMoveWideImmediate (sim_cpu *cpu)
if (op == 1)
HALT_UNALLOC;
- imm = uimm (aarch64_get_instr (cpu), 20, 5);
+ imm = INSTR (20, 5);
if (size == 0)
{
@@ -9227,7 +9224,7 @@ static void
ubfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
{
unsigned rd;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
/* Pick either s+1-r or s+1 consecutive bits out of the original word. */
@@ -9252,7 +9249,7 @@ ubfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
value >>= r - (s + 1);
}
- rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
}
@@ -9262,7 +9259,7 @@ static void
ubfm (sim_cpu *cpu, uint32_t r, uint32_t s)
{
unsigned rd;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
if (r <= s)
@@ -9286,7 +9283,7 @@ ubfm (sim_cpu *cpu, uint32_t r, uint32_t s)
value >>= r - (s + 1);
}
- rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, value);
}
@@ -9302,7 +9299,7 @@ static void
sbfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
{
unsigned rd;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
/* as per ubfm32 but use an ASR instead of an LSR. */
int32_t value = aarch64_get_reg_s32 (cpu, rn, NO_SP);
@@ -9317,7 +9314,7 @@ sbfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
value >>= r - (s + 1);
}
- rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP, (uint32_t) value);
}
@@ -9327,7 +9324,7 @@ static void
sbfm (sim_cpu *cpu, uint32_t r, uint32_t s)
{
unsigned rd;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
/* acpu per ubfm but use an ASR instead of an LSR. */
int64_t value = aarch64_get_reg_s64 (cpu, rn, NO_SP);
@@ -9342,7 +9339,7 @@ sbfm (sim_cpu *cpu, uint32_t r, uint32_t s)
value >>= r - (s + 1);
}
- rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ rd = INSTR (4, 0);
aarch64_set_reg_s64 (cpu, rd, NO_SP, value);
}
@@ -9356,7 +9353,7 @@ sbfm (sim_cpu *cpu, uint32_t r, uint32_t s)
static void
bfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t mask = -1;
unsigned rd;
@@ -9390,7 +9387,7 @@ bfm32 (sim_cpu *cpu, uint32_t r, uint32_t s)
mask >>= r - (s + 1);
}
- rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ rd = INSTR (4, 0);
value2 = aarch64_get_reg_u32 (cpu, rd, NO_SP);
value2 &= ~mask;
@@ -9406,7 +9403,7 @@ static void
bfm (sim_cpu *cpu, uint32_t r, uint32_t s)
{
unsigned rd;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t mask = 0xffffffffffffffffULL;
@@ -9437,7 +9434,7 @@ bfm (sim_cpu *cpu, uint32_t r, uint32_t s)
mask >>= r - (s + 1);
}
- rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP, (aarch64_get_reg_u64 (cpu, rd, NO_SP) & ~mask) | value);
}
@@ -9457,11 +9454,11 @@ dexBitfieldImmediate (sim_cpu *cpu)
/* 32 bit operations must have N = 0 or else we have an UNALLOC. */
uint32_t dispatch;
uint32_t imms;
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
- uint32_t N = uimm (aarch64_get_instr (cpu), 22, 22);
+ uint32_t size = INSTR (31, 31);
+ uint32_t N = INSTR (22, 22);
/* 32 bit operations must have immr[5] = 0 and imms[5] = 0. */
/* or else we have an UNALLOC. */
- uint32_t immr = uimm (aarch64_get_instr (cpu), 21, 16);
+ uint32_t immr = INSTR (21, 16);
if (~size & N)
HALT_UNALLOC;
@@ -9469,12 +9466,12 @@ dexBitfieldImmediate (sim_cpu *cpu)
if (!size && uimm (immr, 5, 5))
HALT_UNALLOC;
- imms = uimm (aarch64_get_instr (cpu), 15, 10);
+ imms = INSTR (15, 10);
if (!size && uimm (imms, 5, 5))
HALT_UNALLOC;
/* Switch on combined size and op. */
- dispatch = uimm (aarch64_get_instr (cpu), 31, 29);
+ dispatch = INSTR (31, 29);
switch (dispatch)
{
case 0: sbfm32 (cpu, immr, imms); return;
@@ -9495,10 +9492,10 @@ do_EXTR_32 (sim_cpu *cpu)
instr[15,10] = imms : 0xxxxx for 32 bit
instr[9,5] = Rn
instr[4,0] = Rd */
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned imms = uimm (aarch64_get_instr (cpu), 15, 10) & 31;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned imms = INSTR (15, 10) & 31;
+ unsigned rn = INSTR ( 9, 5);
+ unsigned rd = INSTR ( 4, 0);
uint64_t val1;
uint64_t val2;
@@ -9518,10 +9515,10 @@ do_EXTR_64 (sim_cpu *cpu)
instr[15,10] = imms
instr[9,5] = Rn
instr[4,0] = Rd */
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned imms = uimm (aarch64_get_instr (cpu), 15, 10) & 63;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned imms = INSTR (15, 10) & 63;
+ unsigned rn = INSTR ( 9, 5);
+ unsigned rd = INSTR ( 4, 0);
uint64_t val;
val = aarch64_get_reg_u64 (cpu, rm, NO_SP);
@@ -9547,11 +9544,11 @@ dexExtractImmediate (sim_cpu *cpu)
/* 32 bit operations must have N = 0 or else we have an UNALLOC. */
/* 64 bit operations must have N = 1 or else we have an UNALLOC. */
uint32_t dispatch;
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
- uint32_t N = uimm (aarch64_get_instr (cpu), 22, 22);
+ uint32_t size = INSTR (31, 31);
+ uint32_t N = INSTR (22, 22);
/* 32 bit operations must have imms[5] = 0
or else we have an UNALLOC. */
- uint32_t imms = uimm (aarch64_get_instr (cpu), 15, 10);
+ uint32_t imms = INSTR (15, 10);
if (size ^ N)
HALT_UNALLOC;
@@ -9560,7 +9557,7 @@ dexExtractImmediate (sim_cpu *cpu)
HALT_UNALLOC;
/* Switch on combined size and op. */
- dispatch = uimm (aarch64_get_instr (cpu), 31, 29);
+ dispatch = INSTR (31, 29);
if (dispatch == 0)
do_EXTR_32 (cpu);
@@ -9627,10 +9624,10 @@ dexLoadUnscaledImmediate (sim_cpu *cpu)
instr[23,22] = opc
instr[20,12] = simm9
instr[9,5] = rn may be SP. */
- /* unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); */
- uint32_t V = uimm (aarch64_get_instr (cpu), 26, 26);
- uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2)
- | uimm (aarch64_get_instr (cpu), 23, 22));
+ /* unsigned rt = INSTR (4, 0); */
+ uint32_t V = INSTR (26, 26);
+ uint32_t dispatch = ( (INSTR (31, 30) << 2)
+ | INSTR (23, 22));
int32_t imm = simm32 (aarch64_get_instr (cpu), 20, 12);
if (!V)
@@ -9704,8 +9701,8 @@ dexLoadUnscaledImmediate (sim_cpu *cpu)
static void
ldrsb32_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int rt = INSTR (4, 0);
/* The target register may not be SP but the source may be
there is no scaling required for a byte load. */
@@ -9719,9 +9716,9 @@ ldrsb32_abs (sim_cpu *cpu, uint32_t offset)
static void
ldrsb32_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned int rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rm = INSTR (20, 16);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int rt = INSTR (4, 0);
/* rn may reference SP, rm and rt must reference ZR. */
@@ -9741,8 +9738,8 @@ static void
ldrsb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
uint64_t address;
- unsigned int rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned int rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned int rn = INSTR (9, 5);
+ unsigned int rt = INSTR (4, 0);
if (rn == rt && wb != NoWriteBack)
HALT_UNALLOC;
@@ -9766,8 +9763,8 @@ ldrsb32_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
fstrb_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned st = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
aarch64_set_mem_u8 (cpu,
aarch64_get_reg_u64 (cpu, rn, SP_OK) + offset,
@@ -9779,9 +9776,9 @@ fstrb_abs (sim_cpu *cpu, uint32_t offset)
static void
fstrb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -9796,8 +9793,8 @@ fstrb_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
fstrh_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned st = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
aarch64_set_mem_u16
(cpu,
@@ -9810,9 +9807,9 @@ fstrh_abs (sim_cpu *cpu, uint32_t offset)
static void
fstrh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -9827,8 +9824,8 @@ fstrh_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
fstrs_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned st = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
aarch64_set_mem_u32
(cpu,
@@ -9840,8 +9837,8 @@ fstrs_abs (sim_cpu *cpu, uint32_t offset)
static void
fstrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -9862,9 +9859,9 @@ fstrs_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
fstrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -9879,8 +9876,8 @@ fstrs_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
fstrd_abs (sim_cpu *cpu, uint32_t offset)
{
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned st = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
aarch64_set_mem_u64
(cpu,
@@ -9892,8 +9889,8 @@ fstrd_abs (sim_cpu *cpu, uint32_t offset)
static void
fstrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
@@ -9914,9 +9911,9 @@ fstrd_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
fstrd_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -9932,8 +9929,8 @@ static void
fstrq_abs (sim_cpu *cpu, uint32_t offset)
{
FRegister a;
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned st = INSTR (4, 0);
+ unsigned rn = INSTR (9, 5);
uint64_t addr;
aarch64_get_FP_long_double (cpu, st, & a);
@@ -9947,8 +9944,8 @@ static void
fstrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
FRegister a;
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
if (wb != Post)
@@ -9969,9 +9966,9 @@ fstrq_wb (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
fstrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned st = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned st = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rn, SP_OK);
int64_t extended = extend (aarch64_get_reg_u32 (cpu, rm, NO_SP),
@@ -9987,21 +9984,22 @@ fstrq_scale_ext (sim_cpu *cpu, Scaling scaling, Extension extension)
static void
dexLoadImmediatePrePost (sim_cpu *cpu)
{
- /* instr[29,24] == 111_00
- instr[21] == 0
- instr[11,10] == 00
- instr[31,30] = size
- instr[26] = V
+ /* instr[31,30] = size
+ instr[29,27] = 111
+ instr[26] = V
+ instr[25,24] = 00
instr[23,22] = opc
+ instr[21] = 0
instr[20,12] = simm9
- instr[11] = wb : 0 ==> Post, 1 ==> Pre
- instr[9,5] = rn may be SP. */
- /* unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0); */
- uint32_t V = uimm (aarch64_get_instr (cpu), 26, 26);
- uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2)
- | uimm (aarch64_get_instr (cpu), 23, 22));
- int32_t imm = simm32 (aarch64_get_instr (cpu), 20, 12);
- WriteBack wb = writeback (aarch64_get_instr (cpu), 11);
+ instr[11] = wb : 0 ==> Post, 1 ==> Pre
+ instr[10] = 0
+ instr[9,5] = Rn may be SP.
+ instr[4,0] = Rt */
+
+ uint32_t V = INSTR (26, 26);
+ uint32_t dispatch = ((INSTR (31, 30) << 2) | INSTR (23, 22));
+ int32_t imm = simm32 (aarch64_get_instr (cpu), 20, 12);
+ WriteBack wb = INSTR (11, 11);
if (!V)
{
@@ -10075,11 +10073,10 @@ dexLoadRegisterOffset (sim_cpu *cpu)
instr[9,5] = rn
instr[4,0] = rt. */
- uint32_t V = uimm (aarch64_get_instr (cpu), 26,26);
- uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2)
- | uimm (aarch64_get_instr (cpu), 23, 22));
- Scaling scale = scaling (aarch64_get_instr (cpu), 12);
- Extension extensionType = extension (aarch64_get_instr (cpu), 13);
+ uint32_t V = INSTR (26, 26);
+ uint32_t dispatch = ((INSTR (31, 30) << 2) | INSTR (23, 22));
+ Scaling scale = INSTR (12, 12);
+ Extension extensionType = INSTR (15, 13);
/* Check for illegal extension types. */
if (uimm (extensionType, 1, 1) == 0)
@@ -10153,11 +10150,11 @@ dexLoadUnsignedImmediate (sim_cpu *cpu)
instr[21,10] = uimm12 : unsigned immediate offset
instr[9,5] = rn may be SP.
instr[4,0] = rt. */
-
- uint32_t V = uimm (aarch64_get_instr (cpu), 26,26);
- uint32_t dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 30) << 2)
- | uimm (aarch64_get_instr (cpu), 23, 22));
- uint32_t imm = uimm (aarch64_get_instr (cpu), 21, 10);
+
+ uint32_t V = INSTR (26,26);
+ uint32_t dispatch = ( (INSTR (31, 30) << 2)
+ | INSTR (23, 22));
+ uint32_t imm = INSTR (21, 10);
if (!V)
{
@@ -10226,7 +10223,7 @@ dexLoadExclusive (sim_cpu *cpu)
instr[9,5] = Rn
instr[4.0] = Rt. */
- switch (uimm (aarch64_get_instr (cpu), 22, 21))
+ switch (INSTR (22, 21))
{
case 2: ldxr (cpu); return;
case 0: stxr (cpu); return;
@@ -10242,14 +10239,14 @@ dexLoadOther (sim_cpu *cpu)
/* instr[29,25] = 111_0
instr[24] == 0 ==> dispatch, 1 ==> ldst reg unsigned immediate
instr[21:11,10] is the secondary dispatch. */
- if (uimm (aarch64_get_instr (cpu), 24, 24))
+ if (INSTR (24, 24))
{
dexLoadUnsignedImmediate (cpu);
return;
}
- dispatch = ( (uimm (aarch64_get_instr (cpu), 21, 21) << 2)
- | uimm (aarch64_get_instr (cpu), 11, 10));
+ dispatch = ( (INSTR (21, 21) << 2)
+ | INSTR (11, 10));
switch (dispatch)
{
case 0: dexLoadUnscaledImmediate (cpu); return;
@@ -10269,9 +10266,9 @@ dexLoadOther (sim_cpu *cpu)
static void
store_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
if ((rn == rd || rm == rd) && wb != NoWriteBack)
@@ -10297,9 +10294,9 @@ store_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
store_pair_u64 (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
if ((rn == rd || rm == rd) && wb != NoWriteBack)
@@ -10325,9 +10322,9 @@ store_pair_u64 (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
load_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
/* treat this as unalloc to make sure we don't do it. */
@@ -10352,9 +10349,9 @@ load_pair_u32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
load_pair_s32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
/* Treat this as unalloc to make sure we don't do it. */
@@ -10379,9 +10376,9 @@ load_pair_s32 (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
load_pair_u64 (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
/* Treat this as unalloc to make sure we don't do it. */
@@ -10416,8 +10413,8 @@ dex_load_store_pair_gr (sim_cpu *cpu)
instr[ 9, 5] = Rd
instr[ 4, 0] = Rm. */
- uint32_t dispatch = ((uimm (aarch64_get_instr (cpu), 31, 30) << 3)
- | uimm (aarch64_get_instr (cpu), 24, 22));
+ uint32_t dispatch = ((INSTR (31, 30) << 3)
+ | INSTR (24, 22));
int32_t offset = simm32 (aarch64_get_instr (cpu), 21, 15);
switch (dispatch)
@@ -10448,9 +10445,9 @@ dex_load_store_pair_gr (sim_cpu *cpu)
static void
store_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
offset <<= 2;
@@ -10471,9 +10468,9 @@ store_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
store_pair_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
offset <<= 3;
@@ -10495,9 +10492,9 @@ static void
store_pair_long_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
FRegister a;
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
offset <<= 4;
@@ -10520,9 +10517,9 @@ store_pair_long_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
load_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
if (rm == rn)
@@ -10546,9 +10543,9 @@ load_pair_float (sim_cpu *cpu, int32_t offset, WriteBack wb)
static void
load_pair_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
if (rm == rn)
@@ -10573,9 +10570,9 @@ static void
load_pair_long_double (sim_cpu *cpu, int32_t offset, WriteBack wb)
{
FRegister a;
- unsigned rn = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rd = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rm = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (14, 10);
+ unsigned rd = INSTR (9, 5);
+ unsigned rm = INSTR (4, 0);
uint64_t address = aarch64_get_reg_u64 (cpu, rd, SP_OK);
if (rm == rn)
@@ -10610,8 +10607,8 @@ dex_load_store_pair_fp (sim_cpu *cpu)
instr[ 9, 5] = Rd
instr[ 4, 0] = Rm */
- uint32_t dispatch = ((uimm (aarch64_get_instr (cpu), 31, 30) << 3)
- | uimm (aarch64_get_instr (cpu), 24, 22));
+ uint32_t dispatch = ((INSTR (31, 30) << 3)
+ | INSTR (24, 22));
int32_t offset = simm32 (aarch64_get_instr (cpu), 21, 15);
switch (dispatch)
@@ -10652,9 +10649,9 @@ vec_reg (unsigned v, unsigned o)
static void
vec_load (sim_cpu *cpu, uint64_t address, unsigned N)
{
- int all = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned size = uimm (aarch64_get_instr (cpu), 11, 10);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int all = INSTR (30, 30);
+ unsigned size = INSTR (11, 10);
+ unsigned vd = INSTR (4, 0);
unsigned i;
switch (size)
@@ -10730,9 +10727,9 @@ LD2 (sim_cpu *cpu, uint64_t address)
static void
LD1_1 (sim_cpu *cpu, uint64_t address)
{
- int all = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned size = uimm (aarch64_get_instr (cpu), 11, 10);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int all = INSTR (30, 30);
+ unsigned size = INSTR (11, 10);
+ unsigned vd = INSTR (4, 0);
unsigned i;
switch (size)
@@ -10805,9 +10802,9 @@ LD1_4 (sim_cpu *cpu, uint64_t address)
static void
vec_store (sim_cpu *cpu, uint64_t address, unsigned N)
{
- int all = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned size = uimm (aarch64_get_instr (cpu), 11, 10);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int all = INSTR (30, 30);
+ unsigned size = INSTR (11, 10);
+ unsigned vd = INSTR (4, 0);
unsigned i;
switch (size)
@@ -10891,9 +10888,9 @@ ST2 (sim_cpu *cpu, uint64_t address)
static void
ST1_1 (sim_cpu *cpu, uint64_t address)
{
- int all = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned size = uimm (aarch64_get_instr (cpu), 11, 10);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
+ int all = INSTR (30, 30);
+ unsigned size = INSTR (11, 10);
+ unsigned vd = INSTR (4, 0);
unsigned i;
switch (size)
@@ -10973,9 +10970,9 @@ do_vec_LDnR (sim_cpu *cpu, uint64_t address)
instr[9,5] = address
instr[4,0] = Vd */
- unsigned full = uimm (aarch64_get_instr (cpu), 30, 30);
- unsigned vd = uimm (aarch64_get_instr (cpu), 4, 0);
- unsigned size = uimm (aarch64_get_instr (cpu), 11, 10);
+ unsigned full = INSTR (30, 30);
+ unsigned vd = INSTR (4, 0);
+ unsigned size = INSTR (11, 10);
int i;
NYI_assert (29, 24, 0x0D);
@@ -10983,8 +10980,8 @@ do_vec_LDnR (sim_cpu *cpu, uint64_t address)
NYI_assert (15, 14, 3);
NYI_assert (12, 12, 0);
- switch ((uimm (aarch64_get_instr (cpu), 13, 13) << 1)
- | uimm (aarch64_get_instr (cpu), 21, 21))
+ switch ((INSTR (13, 13) << 1)
+ | INSTR (21, 21))
{
case 0: /* LD1R. */
switch (size)
@@ -11277,22 +11274,22 @@ do_vec_load_store (sim_cpu *cpu)
uint64_t address;
int type;
- if (uimm (aarch64_get_instr (cpu), 31, 31) != 0
- || uimm (aarch64_get_instr (cpu), 29, 25) != 0x06)
+ if (INSTR (31, 31) != 0
+ || INSTR (29, 25) != 0x06)
HALT_NYI;
- type = uimm (aarch64_get_instr (cpu), 15, 12);
- if (type != 0xE && type != 0xE && uimm (aarch64_get_instr (cpu), 21, 21) != 0)
+ type = INSTR (15, 12);
+ if (type != 0xE && type != 0xE && INSTR (21, 21) != 0)
HALT_NYI;
- post = uimm (aarch64_get_instr (cpu), 23, 23);
- load = uimm (aarch64_get_instr (cpu), 22, 22);
- vn = uimm (aarch64_get_instr (cpu), 9, 5);
+ post = INSTR (23, 23);
+ load = INSTR (22, 22);
+ vn = INSTR (9, 5);
address = aarch64_get_reg_u64 (cpu, vn, SP_OK);
if (post)
{
- unsigned vm = uimm (aarch64_get_instr (cpu), 20, 16);
+ unsigned vm = INSTR (20, 16);
if (vm == R31)
{
@@ -11305,20 +11302,20 @@ do_vec_load_store (sim_cpu *cpu)
case 8: sizeof_operation = 16; break;
case 0xC:
- sizeof_operation = uimm (aarch64_get_instr (cpu), 21, 21) ? 2 : 1;
- sizeof_operation <<= uimm (aarch64_get_instr (cpu), 11, 10);
+ sizeof_operation = INSTR (21, 21) ? 2 : 1;
+ sizeof_operation <<= INSTR (11, 10);
break;
case 0xE:
- sizeof_operation = uimm (aarch64_get_instr (cpu), 21, 21) ? 8 : 4;
- sizeof_operation <<= uimm (aarch64_get_instr (cpu), 11, 10);
+ sizeof_operation = INSTR (21, 21) ? 8 : 4;
+ sizeof_operation <<= INSTR (11, 10);
break;
case 7:
/* One register, immediate offset variant. */
sizeof_operation = 8;
break;
-
+
case 10:
/* Two registers, immediate offset variant. */
sizeof_operation = 16;
@@ -11338,7 +11335,7 @@ do_vec_load_store (sim_cpu *cpu)
HALT_UNALLOC;
}
- if (uimm (aarch64_get_instr (cpu), 30, 30))
+ if (INSTR (30, 30))
sizeof_operation *= 2;
aarch64_set_reg_u64 (cpu, vn, SP_OK, address + sizeof_operation);
@@ -11429,36 +11426,27 @@ dexLdSt (sim_cpu *cpu)
static void
dexLogicalShiftedRegister (sim_cpu *cpu)
{
- /* assert instr[28:24] = 01010
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[30,29:21] = op,N : 000 ==> AND, 001 ==> BIC,
- 010 ==> ORR, 011 ==> ORN
- 100 ==> EOR, 101 ==> EON,
- 110 ==> ANDS, 111 ==> BICS
+ /* instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
+ instr[30,29] = op
+ instr[28:24] = 01010
instr[23,22] = shift : 0 ==> LSL, 1 ==> LSR, 2 ==> ASR, 3 ==> ROR
+ instr[21] = N
+ instr[20,16] = Rm
instr[15,10] = count : must be 0xxxxx for 32 bit
- instr[9,5] = Rn
- instr[4,0] = Rd */
-
- /* unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16); */
- uint32_t dispatch;
- Shift shiftType;
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
+ instr[9,5] = Rn
+ instr[4,0] = Rd */
- /* 32 bit operations must have count[5] = 0. */
- /* or else we have an UNALLOC. */
- uint32_t count = uimm (aarch64_get_instr (cpu), 15, 10);
+ uint32_t size = INSTR (31, 31);
+ Shift shiftType = INSTR (23, 22);
+ uint32_t count = INSTR (15, 10);
- if (!size && uimm (count, 5, 5))
+ /* 32 bit operations must have count[5] = 0.
+ or else we have an UNALLOC. */
+ if (size == 0 && uimm (count, 5, 5))
HALT_UNALLOC;
- shiftType = shift (aarch64_get_instr (cpu), 22);
-
- /* dispatch on size:op:N i.e aarch64_get_instr (cpu)[31,29:21]. */
- dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 29) << 1)
- | uimm (aarch64_get_instr (cpu), 21, 21));
-
- switch (dispatch)
+ /* Dispatch on size:op:N. */
+ switch ((INSTR (31, 29) << 1) | INSTR (21, 21))
{
case 0: and32_shift (cpu, shiftType, count); return;
case 1: bic32_shift (cpu, shiftType, count); return;
@@ -11476,7 +11464,6 @@ dexLogicalShiftedRegister (sim_cpu *cpu)
case 13:eon64_shift (cpu, shiftType, count); return;
case 14:ands64_shift (cpu, shiftType, count); return;
case 15:bics64_shift (cpu, shiftType, count); return;
- default: HALT_UNALLOC;
}
}
@@ -11484,9 +11471,9 @@ dexLogicalShiftedRegister (sim_cpu *cpu)
static void
csel32 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11498,9 +11485,9 @@ csel32 (sim_cpu *cpu, CondCode cc)
static void
csel64 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11512,9 +11499,9 @@ csel64 (sim_cpu *cpu, CondCode cc)
static void
csinc32 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11526,9 +11513,9 @@ csinc32 (sim_cpu *cpu, CondCode cc)
static void
csinc64 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11540,9 +11527,9 @@ csinc64 (sim_cpu *cpu, CondCode cc)
static void
csinv32 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11554,9 +11541,9 @@ csinv32 (sim_cpu *cpu, CondCode cc)
static void
csinv64 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11568,9 +11555,9 @@ csinv64 (sim_cpu *cpu, CondCode cc)
static void
csneg32 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11582,9 +11569,9 @@ csneg32 (sim_cpu *cpu, CondCode cc)
static void
csneg64 (sim_cpu *cpu, CondCode cc)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
testConditionCode (cpu, cc)
@@ -11595,8 +11582,8 @@ csneg64 (sim_cpu *cpu, CondCode cc)
static void
dexCondSelect (sim_cpu *cpu)
{
- /* assert instr[28,21] = 11011011
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
+ /* instr[28,21] = 11011011
+ instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
instr[30:11,10] = op : 000 ==> CSEL, 001 ==> CSINC,
100 ==> CSINV, 101 ==> CSNEG,
_1_ ==> UNALLOC
@@ -11604,10 +11591,9 @@ dexCondSelect (sim_cpu *cpu)
instr[15,12] = cond
instr[29] = S : 0 ==> ok, 1 ==> UNALLOC */
- CondCode cc;
- uint32_t dispatch;
- uint32_t S = uimm (aarch64_get_instr (cpu), 29, 29);
- uint32_t op2 = uimm (aarch64_get_instr (cpu), 11, 10);
+ CondCode cc = INSTR (15, 12);
+ uint32_t S = INSTR (29, 29);
+ uint32_t op2 = INSTR (11, 10);
if (S == 1)
HALT_UNALLOC;
@@ -11615,10 +11601,7 @@ dexCondSelect (sim_cpu *cpu)
if (op2 & 0x2)
HALT_UNALLOC;
- cc = condcode (aarch64_get_instr (cpu), 12);
- dispatch = ((uimm (aarch64_get_instr (cpu), 31, 30) << 1) | op2);
-
- switch (dispatch)
+ switch ((INSTR (31, 30) << 1) | op2)
{
case 0: csel32 (cpu, cc); return;
case 1: csinc32 (cpu, cc); return;
@@ -11628,7 +11611,6 @@ dexCondSelect (sim_cpu *cpu)
case 5: csinc64 (cpu, cc); return;
case 6: csinv64 (cpu, cc); return;
case 7: csneg64 (cpu, cc); return;
- default: HALT_UNALLOC;
}
}
@@ -11729,8 +11711,8 @@ leading64 (uint64_t value)
static void
cls32 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. the result needs to exclude the leading bit. */
aarch64_set_reg_u64
@@ -11741,8 +11723,8 @@ cls32 (sim_cpu *cpu)
static void
cls64 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. the result needs to exclude the leading bit. */
aarch64_set_reg_u64
@@ -11753,8 +11735,8 @@ cls64 (sim_cpu *cpu)
static void
clz32 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
/* if the sign (top) bit is set then the count is 0. */
@@ -11768,8 +11750,8 @@ clz32 (sim_cpu *cpu)
static void
clz64 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
/* if the sign (top) bit is set then the count is 0. */
@@ -11783,8 +11765,8 @@ clz64 (sim_cpu *cpu)
static void
rbit32 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t result = 0;
int i;
@@ -11802,8 +11784,8 @@ rbit32 (sim_cpu *cpu)
static void
rbit64 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t result = 0;
int i;
@@ -11821,8 +11803,8 @@ rbit64 (sim_cpu *cpu)
static void
rev32 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t result = 0;
int i;
@@ -11840,8 +11822,8 @@ rev32 (sim_cpu *cpu)
static void
rev64 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t result = 0;
int i;
@@ -11860,8 +11842,8 @@ rev64 (sim_cpu *cpu)
static void
revh32 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint32_t value = aarch64_get_reg_u32 (cpu, rn, NO_SP);
uint32_t result = 0;
int i;
@@ -11880,8 +11862,8 @@ revh32 (sim_cpu *cpu)
static void
revh64 (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
uint64_t value = aarch64_get_reg_u64 (cpu, rn, NO_SP);
uint64_t result = 0;
int i;
@@ -11898,22 +11880,22 @@ revh64 (sim_cpu *cpu)
static void
dexDataProc1Source (sim_cpu *cpu)
{
- /* assert instr[30] == 1
- aarch64_get_instr (cpu)[28,21] == 111010110
- instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
- instr[29] = S : 0 ==> ok, 1 ==> UNALLOC
+ /* instr[30] = 1
+ instr[28,21] = 111010110
+ instr[31] = size : 0 ==> 32 bit, 1 ==> 64 bit
+ instr[29] = S : 0 ==> ok, 1 ==> UNALLOC
instr[20,16] = opcode2 : 00000 ==> ok, ow ==> UNALLOC
instr[15,10] = opcode : 000000 ==> RBIT, 000001 ==> REV16,
000010 ==> REV, 000011 ==> UNALLOC
000100 ==> CLZ, 000101 ==> CLS
ow ==> UNALLOC
- instr[9,5] = rn : may not be SP
- instr[4,0] = rd : may not be SP. */
+ instr[9,5] = rn : may not be SP
+ instr[4,0] = rd : may not be SP. */
- uint32_t S = uimm (aarch64_get_instr (cpu), 29, 29);
- uint32_t opcode2 = uimm (aarch64_get_instr (cpu), 20, 16);
- uint32_t opcode = uimm (aarch64_get_instr (cpu), 15, 10);
- uint32_t dispatch = ((uimm (aarch64_get_instr (cpu), 31, 31) << 3) | opcode);
+ uint32_t S = INSTR (29, 29);
+ uint32_t opcode2 = INSTR (20, 16);
+ uint32_t opcode = INSTR (15, 10);
+ uint32_t dispatch = ((INSTR (31, 31) << 3) | opcode);
if (S == 1)
HALT_UNALLOC;
@@ -11953,9 +11935,9 @@ dexDataProc1Source (sim_cpu *cpu)
static void
asrv32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -11967,9 +11949,9 @@ asrv32 (sim_cpu *cpu)
static void
asrv64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -11981,9 +11963,9 @@ asrv64 (sim_cpu *cpu)
static void
lslv32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -11995,9 +11977,9 @@ lslv32 (sim_cpu *cpu)
static void
lslv64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -12009,9 +11991,9 @@ lslv64 (sim_cpu *cpu)
static void
lsrv32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -12023,9 +12005,9 @@ lsrv32 (sim_cpu *cpu)
static void
lsrv64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -12037,9 +12019,9 @@ lsrv64 (sim_cpu *cpu)
static void
rorv32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -12051,9 +12033,9 @@ rorv32 (sim_cpu *cpu)
static void
rorv64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64
(cpu, rd, NO_SP,
@@ -12068,9 +12050,9 @@ rorv64 (sim_cpu *cpu)
static void
cpuiv32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. the pseudo-code does the divide using 64 bit data. */
/* TODO : check that this rounds towards zero as required. */
int64_t dividend = aarch64_get_reg_s32 (cpu, rn, NO_SP);
@@ -12084,9 +12066,9 @@ cpuiv32 (sim_cpu *cpu)
static void
cpuiv64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* TODO : check that this rounds towards zero as required. */
int64_t divisor = aarch64_get_reg_s64 (cpu, rm, NO_SP);
@@ -12100,9 +12082,9 @@ cpuiv64 (sim_cpu *cpu)
static void
udiv32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. the pseudo-code does the divide using 64 bit data. */
uint64_t dividend = aarch64_get_reg_u32 (cpu, rn, NO_SP);
@@ -12116,9 +12098,9 @@ udiv32 (sim_cpu *cpu)
static void
udiv64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* TODO : check that this rounds towards zero as required. */
uint64_t divisor = aarch64_get_reg_u64 (cpu, rm, NO_SP);
@@ -12141,8 +12123,8 @@ dexDataProc2Source (sim_cpu *cpu)
ow ==> UNALLOC. */
uint32_t dispatch;
- uint32_t S = uimm (aarch64_get_instr (cpu), 29, 29);
- uint32_t opcode = uimm (aarch64_get_instr (cpu), 15, 10);
+ uint32_t S = INSTR (29, 29);
+ uint32_t opcode = INSTR (15, 10);
if (S == 1)
HALT_UNALLOC;
@@ -12150,7 +12132,7 @@ dexDataProc2Source (sim_cpu *cpu)
if (opcode & 0x34)
HALT_UNALLOC;
- dispatch = ( (uimm (aarch64_get_instr (cpu), 31, 31) << 3)
+ dispatch = ( (INSTR (31, 31) << 3)
| (uimm (opcode, 3, 3) << 2)
| uimm (opcode, 1, 0));
switch (dispatch)
@@ -12178,10 +12160,10 @@ dexDataProc2Source (sim_cpu *cpu)
static void
madd32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, ra, NO_SP)
@@ -12193,10 +12175,10 @@ madd32 (sim_cpu *cpu)
static void
madd64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, ra, NO_SP)
@@ -12208,10 +12190,10 @@ madd64 (sim_cpu *cpu)
static void
msub32 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u32 (cpu, ra, NO_SP)
@@ -12223,10 +12205,10 @@ msub32 (sim_cpu *cpu)
static void
msub64 (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rd, NO_SP,
aarch64_get_reg_u64 (cpu, ra, NO_SP)
@@ -12238,10 +12220,10 @@ msub64 (sim_cpu *cpu)
static void
smaddl (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. we need to multiply the signed 32 bit values in rn, rm to
obtain a 64 bit product. */
@@ -12256,10 +12238,10 @@ smaddl (sim_cpu *cpu)
static void
smsubl (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. we need to multiply the signed 32 bit values in rn, rm to
obtain a 64 bit product. */
@@ -12351,16 +12333,16 @@ static void
smulh (sim_cpu *cpu)
{
uint64_t uresult;
- int64_t result;
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- GReg ra = greg (aarch64_get_instr (cpu), 10);
- int64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
- int64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
+ int64_t result;
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+ GReg ra = INSTR (14, 10);
+ int64_t value1 = aarch64_get_reg_u64 (cpu, rn, NO_SP);
+ int64_t value2 = aarch64_get_reg_u64 (cpu, rm, NO_SP);
uint64_t uvalue1;
uint64_t uvalue2;
- int64_t signum = 1;
+ int64_t signum = 1;
if (ra != R31)
HALT_UNALLOC;
@@ -12399,10 +12381,10 @@ smulh (sim_cpu *cpu)
static void
umaddl (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. we need to multiply the signed 32 bit values in rn, rm to
obtain a 64 bit product. */
@@ -12417,10 +12399,10 @@ umaddl (sim_cpu *cpu)
static void
umsubl (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned ra = uimm (aarch64_get_instr (cpu), 14, 10);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rm = INSTR (20, 16);
+ unsigned ra = INSTR (14, 10);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
/* N.B. we need to multiply the signed 32 bit values in rn, rm to
obtain a 64 bit product. */
@@ -12436,10 +12418,10 @@ umsubl (sim_cpu *cpu)
static void
umulh (sim_cpu *cpu)
{
- unsigned rm = uimm (aarch64_get_instr (cpu), 20, 16);
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
- unsigned rd = uimm (aarch64_get_instr (cpu), 4, 0);
- GReg ra = greg (aarch64_get_instr (cpu), 10);
+ unsigned rm = INSTR (20, 16);
+ unsigned rn = INSTR (9, 5);
+ unsigned rd = INSTR (4, 0);
+ GReg ra = INSTR (14, 10);
if (ra != R31)
HALT_UNALLOC;
@@ -12465,10 +12447,10 @@ dexDataProc3Source (sim_cpu *cpu)
ow ==> UNALLOC. */
uint32_t dispatch;
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
- uint32_t op54 = uimm (aarch64_get_instr (cpu), 30, 29);
- uint32_t op31 = uimm (aarch64_get_instr (cpu), 23, 21);
- uint32_t o0 = uimm (aarch64_get_instr (cpu), 15, 15);
+ uint32_t size = INSTR (31, 31);
+ uint32_t op54 = INSTR (30, 29);
+ uint32_t op31 = INSTR (23, 21);
+ uint32_t o0 = INSTR (15, 15);
if (op54 != 0)
HALT_UNALLOC;
@@ -12622,7 +12604,7 @@ bl (sim_cpu *cpu, int32_t offset)
static void
br (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP));
}
@@ -12630,7 +12612,7 @@ br (sim_cpu *cpu)
static void
blr (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
/* The pseudo code in the spec says we update LR before fetching.
the value from the rn. */
@@ -12658,7 +12640,7 @@ blr (sim_cpu *cpu)
static void
ret (sim_cpu *cpu)
{
- unsigned rn = uimm (aarch64_get_instr (cpu), 9, 5);
+ unsigned rn = INSTR (9, 5);
aarch64_set_next_PC (cpu, aarch64_get_reg_u64 (cpu, rn, NO_SP));
if (TRACE_BRANCH_P (cpu))
@@ -12706,7 +12688,7 @@ dexBranchImmediate (sim_cpu *cpu)
instr[31] ==> 0 == B, 1 == BL
instr[25,0] == imm26 branch offset counted in words. */
- uint32_t top = uimm (aarch64_get_instr (cpu), 31, 31);
+ uint32_t top = INSTR (31, 31);
/* We have a 26 byte signed word offset which we need to pass to the
execute routine as a signed byte offset. */
int32_t offset = simm32 (aarch64_get_instr (cpu), 25, 0) << 2;
@@ -12740,7 +12722,7 @@ bcc (sim_cpu *cpu, int32_t offset, CondCode cc)
static void
cbnz32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rt = INSTR (4, 0);
if (aarch64_get_reg_u32 (cpu, rt, NO_SP) != 0)
aarch64_set_next_PC_by_offset (cpu, offset);
@@ -12750,7 +12732,7 @@ cbnz32 (sim_cpu *cpu, int32_t offset)
static void
cbnz (sim_cpu *cpu, int32_t offset)
{
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rt = INSTR (4, 0);
if (aarch64_get_reg_u64 (cpu, rt, NO_SP) != 0)
aarch64_set_next_PC_by_offset (cpu, offset);
@@ -12760,7 +12742,7 @@ cbnz (sim_cpu *cpu, int32_t offset)
static void
cbz32 (sim_cpu *cpu, int32_t offset)
{
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rt = INSTR (4, 0);
if (aarch64_get_reg_u32 (cpu, rt, NO_SP) == 0)
aarch64_set_next_PC_by_offset (cpu, offset);
@@ -12770,7 +12752,7 @@ cbz32 (sim_cpu *cpu, int32_t offset)
static void
cbz (sim_cpu *cpu, int32_t offset)
{
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rt = INSTR (4, 0);
if (aarch64_get_reg_u64 (cpu, rt, NO_SP) == 0)
aarch64_set_next_PC_by_offset (cpu, offset);
@@ -12780,7 +12762,7 @@ cbz (sim_cpu *cpu, int32_t offset)
static void
tbnz (sim_cpu *cpu, uint32_t pos, int32_t offset)
{
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rt = INSTR (4, 0);
if (aarch64_get_reg_u64 (cpu, rt, NO_SP) & (1 << pos))
aarch64_set_next_PC_by_offset (cpu, offset);
@@ -12790,7 +12772,7 @@ tbnz (sim_cpu *cpu, uint32_t pos, int32_t offset)
static void
tbz (sim_cpu *cpu, uint32_t pos, int32_t offset)
{
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rt = INSTR (4, 0);
if (!(aarch64_get_reg_u64 (cpu, rt, NO_SP) & (1 << pos)))
aarch64_set_next_PC_by_offset (cpu, offset);
@@ -12805,8 +12787,8 @@ dexCompareBranchImmediate (sim_cpu *cpu)
instr[23,5] = simm19 branch offset counted in words
instr[4,0] = rt */
- uint32_t size = uimm (aarch64_get_instr (cpu), 31, 31);
- uint32_t op = uimm (aarch64_get_instr (cpu), 24, 24);
+ uint32_t size = INSTR (31, 31);
+ uint32_t op = INSTR (24, 24);
int32_t offset = simm32 (aarch64_get_instr (cpu), 23, 5) << 2;
if (size == 0)
@@ -12835,13 +12817,13 @@ dexTestBranchImmediate (sim_cpu *cpu)
instr[18,5] = simm14 : signed offset counted in words
instr[4,0] = uimm5 */
- uint32_t pos = ((uimm (aarch64_get_instr (cpu), 31, 31) << 4)
- | uimm (aarch64_get_instr (cpu), 23,19));
+ uint32_t pos = ((INSTR (31, 31) << 4)
+ | INSTR (23,19));
int32_t offset = simm32 (aarch64_get_instr (cpu), 18, 5) << 2;
NYI_assert (30, 25, 0x1b);
- if (uimm (aarch64_get_instr (cpu), 24, 24) == 0)
+ if (INSTR (24, 24) == 0)
tbz (cpu, pos, offset);
else
tbnz (cpu, pos, offset);
@@ -12857,9 +12839,7 @@ dexCondBranchImmediate (sim_cpu *cpu)
instr[3,0] = cond */
int32_t offset;
- CondCode cc;
- uint32_t op = ((uimm (aarch64_get_instr (cpu), 24, 24) << 1)
- | uimm (aarch64_get_instr (cpu), 4, 4));
+ uint32_t op = ((INSTR (24, 24) << 1) | INSTR (4, 4));
NYI_assert (31, 25, 0x2a);
@@ -12867,9 +12847,8 @@ dexCondBranchImmediate (sim_cpu *cpu)
HALT_UNALLOC;
offset = simm32 (aarch64_get_instr (cpu), 23, 5) << 2;
- cc = condcode (aarch64_get_instr (cpu), 0);
- bcc (cpu, offset, cc);
+ bcc (cpu, offset, INSTR (3, 0));
}
static void
@@ -12881,10 +12860,10 @@ dexBranchRegister (sim_cpu *cpu)
instr[15,10] = op3 : must be 000000
instr[4,0] = op2 : must be 11111. */
- uint32_t op = uimm (aarch64_get_instr (cpu), 24, 21);
- uint32_t op2 = uimm (aarch64_get_instr (cpu), 20, 16);
- uint32_t op3 = uimm (aarch64_get_instr (cpu), 15, 10);
- uint32_t op4 = uimm (aarch64_get_instr (cpu), 4, 0);
+ uint32_t op = INSTR (24, 21);
+ uint32_t op2 = INSTR (20, 16);
+ uint32_t op3 = INSTR (15, 10);
+ uint32_t op4 = INSTR (4, 0);
NYI_assert (31, 25, 0x6b);
@@ -12902,9 +12881,9 @@ dexBranchRegister (sim_cpu *cpu)
else
{
- /* ERET and DRPS accept 0b11111 for rn = aarch64_get_instr (cpu)[4,0]. */
+ /* ERET and DRPS accept 0b11111 for rn = instr [4,0]. */
/* anything else is unallocated. */
- uint32_t rn = greg (aarch64_get_instr (cpu), 0);
+ uint32_t rn = INSTR (4, 0);
if (rn != 0x1f)
HALT_UNALLOC;
@@ -13126,9 +13105,9 @@ dexExcpnGen (sim_cpu *cpu)
instr[4,2] = opc2 000 ==> OK, ow ==> UNALLOC
instr[1,0] = LL : discriminates opc */
- uint32_t opc = uimm (aarch64_get_instr (cpu), 23, 21);
- uint32_t imm16 = uimm (aarch64_get_instr (cpu), 20, 5);
- uint32_t opc2 = uimm (aarch64_get_instr (cpu), 4, 2);
+ uint32_t opc = INSTR (23, 21);
+ uint32_t imm16 = INSTR (20, 5);
+ uint32_t opc2 = INSTR (4, 2);
uint32_t LL;
NYI_assert (31, 24, 0xd4);
@@ -13136,7 +13115,7 @@ dexExcpnGen (sim_cpu *cpu)
if (opc2 != 0)
HALT_UNALLOC;
- LL = uimm (aarch64_get_instr (cpu), 1, 0);
+ LL = INSTR (1, 0);
/* We only implement HLT and BRK for now. */
if (opc == 1 && LL == 0)
@@ -13206,7 +13185,7 @@ system_set (sim_cpu *cpu, unsigned op0, unsigned op1, unsigned crn,
else
HALT_NYI;
}
-
+
static void
do_mrs (sim_cpu *cpu)
{
@@ -13217,12 +13196,12 @@ do_mrs (sim_cpu *cpu)
instr[11,8] = CRm
instr[7,5] = op2
instr[4,0] = Rt */
- unsigned sys_op0 = uimm (aarch64_get_instr (cpu), 19, 19) + 2;
- unsigned sys_op1 = uimm (aarch64_get_instr (cpu), 18, 16);
- unsigned sys_crn = uimm (aarch64_get_instr (cpu), 15, 12);
- unsigned sys_crm = uimm (aarch64_get_instr (cpu), 11, 8);
- unsigned sys_op2 = uimm (aarch64_get_instr (cpu), 7, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sys_op0 = INSTR (19, 19) + 2;
+ unsigned sys_op1 = INSTR (18, 16);
+ unsigned sys_crn = INSTR (15, 12);
+ unsigned sys_crm = INSTR (11, 8);
+ unsigned sys_op2 = INSTR (7, 5);
+ unsigned rt = INSTR (4, 0);
aarch64_set_reg_u64 (cpu, rt, NO_SP,
system_get (cpu, sys_op0, sys_op1, sys_crn, sys_crm, sys_op2));
@@ -13238,9 +13217,9 @@ do_MSR_immediate (sim_cpu *cpu)
instr[7,5] = op2
instr[4,0] = 1 1111 */
- unsigned op1 = uimm (aarch64_get_instr (cpu), 18, 16);
- /*unsigned crm = uimm (aarch64_get_instr (cpu), 11, 8);*/
- unsigned op2 = uimm (aarch64_get_instr (cpu), 7, 5);
+ unsigned op1 = INSTR (18, 16);
+ /*unsigned crm = INSTR (11, 8);*/
+ unsigned op2 = INSTR (7, 5);
NYI_assert (31, 19, 0x1AA0);
NYI_assert (15, 12, 0x4);
@@ -13277,21 +13256,21 @@ do_MSR_reg (sim_cpu *cpu)
instr[7,5] = op2
instr[4,0] = Rt */
- unsigned sys_op0 = uimm (aarch64_get_instr (cpu), 19, 19) + 2;
- unsigned sys_op1 = uimm (aarch64_get_instr (cpu), 18, 16);
- unsigned sys_crn = uimm (aarch64_get_instr (cpu), 15, 12);
- unsigned sys_crm = uimm (aarch64_get_instr (cpu), 11, 8);
- unsigned sys_op2 = uimm (aarch64_get_instr (cpu), 7, 5);
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned sys_op0 = INSTR (19, 19) + 2;
+ unsigned sys_op1 = INSTR (18, 16);
+ unsigned sys_crn = INSTR (15, 12);
+ unsigned sys_crm = INSTR (11, 8);
+ unsigned sys_op2 = INSTR (7, 5);
+ unsigned rt = INSTR (4, 0);
NYI_assert (31, 20, 0xD51);
-
+
system_set (cpu, sys_op0, sys_op1, sys_crn, sys_crm, sys_op2,
aarch64_get_reg_u64 (cpu, rt, NO_SP));
}
static void
-do_SYS (sim_cpu * cpu)
+do_SYS (sim_cpu *cpu)
{
/* instr[31,19] = 1101 0101 0000 1
instr[18,16] = op1
@@ -13303,7 +13282,7 @@ do_SYS (sim_cpu * cpu)
/* FIXME: For now we just silently accept system ops. */
}
-
+
static void
dexSystem (sim_cpu *cpu)
{
@@ -13333,7 +13312,7 @@ dexSystem (sim_cpu *cpu)
types : 01 ==> Reads, 10 ==> Writes,
11 ==> All, 00 ==> All (domain == FullSystem). */
- unsigned rt = uimm (aarch64_get_instr (cpu), 4, 0);
+ unsigned rt = INSTR (4, 0);
NYI_assert (31, 22, 0x354);
@@ -13344,8 +13323,8 @@ dexSystem (sim_cpu *cpu)
{
/* NOP has CRm != 0000 OR. */
/* (CRm == 0000 AND (op2 == 000 OR op2 > 101)). */
- uint32_t crm = uimm (aarch64_get_instr (cpu), 11, 8);
- uint32_t op2 = uimm (aarch64_get_instr (cpu), 7, 5);
+ uint32_t crm = INSTR (11, 8);
+ uint32_t op2 = INSTR (7, 5);
if (crm != 0 || (op2 == 0 || op2 > 5))
{
@@ -13358,7 +13337,7 @@ dexSystem (sim_cpu *cpu)
case 0x033:
{
- uint32_t op2 = uimm (aarch64_get_instr (cpu), 7, 5);
+ uint32_t op2 = INSTR (7, 5);
switch (op2)
{
@@ -13381,7 +13360,7 @@ dexSystem (sim_cpu *cpu)
return;
default:
- if (uimm (aarch64_get_instr (cpu), 21, 20) == 0x1)
+ if (INSTR (21, 20) == 0x1)
do_MSR_reg (cpu);
else if (INSTR (21, 19) == 0 && INSTR (15, 12) == 0x4)
do_MSR_immediate (cpu);
@@ -13406,7 +13385,7 @@ dexBr (sim_cpu *cpu)
case BR_IMMCMP_001:
/* Compare has bit 25 clear while test has it set. */
- if (!uimm (aarch64_get_instr (cpu), 25, 25))
+ if (!INSTR (25, 25))
dexCompareBranchImmediate (cpu);
else
dexTestBranchImmediate (cpu);
@@ -13415,7 +13394,7 @@ dexBr (sim_cpu *cpu)
case BR_IMMCOND_010:
/* This is a conditional branch if bit 25 is clear otherwise
unallocated. */
- if (!uimm (aarch64_get_instr (cpu), 25, 25))
+ if (!INSTR (25, 25))
dexCondBranchImmediate (cpu);
else
HALT_UNALLOC;
@@ -13430,7 +13409,7 @@ dexBr (sim_cpu *cpu)
case BR_IMMCMP_101:
/* Compare has bit 25 clear while test has it set. */
- if (!uimm (aarch64_get_instr (cpu), 25, 25))
+ if (!INSTR (25, 25))
dexCompareBranchImmediate (cpu);
else
dexTestBranchImmediate (cpu);
@@ -13438,20 +13417,20 @@ dexBr (sim_cpu *cpu)
case BR_REG_110:
/* Unconditional branch reg has bit 25 set. */
- if (uimm (aarch64_get_instr (cpu), 25, 25))
+ if (INSTR (25, 25))
dexBranchRegister (cpu);
/* This includes both Excpn Gen, System and unalloc operations.
We need to decode the Excpn Gen operation BRK so we can plant
debugger entry points.
- Excpn Gen operations have aarch64_get_instr (cpu)[24] = 0.
+ Excpn Gen operations have instr [24] = 0.
we need to decode at least one of the System operations NOP
which is an alias for HINT #0.
- System operations have aarch64_get_instr (cpu)[24,22] = 100. */
- else if (uimm (aarch64_get_instr (cpu), 24, 24) == 0)
+ System operations have instr [24,22] = 100. */
+ else if (INSTR (24, 24) == 0)
dexExcpnGen (cpu);
- else if (uimm (aarch64_get_instr (cpu), 24, 22) == 4)
+ else if (INSTR (24, 22) == 4)
dexSystem (cpu);
else