aboutsummaryrefslogtreecommitdiff
path: root/target/arm/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/cpu.h')
-rw-r--r--target/arm/cpu.h967
1 files changed, 166 insertions, 801 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a8177c6..bf221e6 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -24,16 +24,16 @@
#include "qemu/cpu-float.h"
#include "hw/registerfields.h"
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "exec/gdbstub.h"
#include "exec/page-protection.h"
#include "qapi/qapi-types-common.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
-
-#ifdef TARGET_AARCH64
-#define KVM_HAVE_MCE_INJECTION 1
-#endif
+#include "target/arm/cpu-sysregs.h"
+#include "target/arm/mmuidx.h"
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
@@ -100,12 +100,6 @@
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
#endif
-/* ARM-specific extra insn start words:
- * 1: Conditional execution bits
- * 2: Partial exception syndrome for data aborts
- */
-#define TARGET_INSN_START_EXTRA_WORDS 2
-
/* The 2nd extra word holding syndrome info for data aborts does not use
* the upper 6 bits nor the lower 13 bits. We mask and shift it down to
* help the sleb128 encoder do a better job.
@@ -171,17 +165,12 @@ typedef struct ARMGenericTimer {
* Align the data for use with TCG host vector operations.
*/
-#ifdef TARGET_AARCH64
-# define ARM_MAX_VQ 16
-#else
-# define ARM_MAX_VQ 1
-#endif
+#define ARM_MAX_VQ 16
typedef struct ARMVectorReg {
uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
} ARMVectorReg;
-#ifdef TARGET_AARCH64
/* In AArch32 mode, predicate registers do not exist at all. */
typedef struct ARMPredicateReg {
uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
@@ -191,12 +180,11 @@ typedef struct ARMPredicateReg {
typedef struct ARMPACKey {
uint64_t lo, hi;
} ARMPACKey;
-#endif
/* See the commentary above the TBFLAG field definitions. */
typedef struct CPUARMTBFlags {
uint32_t flags;
- target_ulong flags2;
+ uint64_t flags2;
} CPUARMTBFlags;
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
@@ -220,6 +208,8 @@ typedef struct NVICState NVICState;
* when FPCR.AH == 1 (bfloat16 conversions and multiplies,
* and the reciprocal and square root estimate/step insns);
* for half-precision
+ * ZA: the "streaming sve" fp status.
+ * ZA_F16: likewise for half-precision.
*
* Half-precision operations are governed by a separate
* flush-to-zero control bit in FPSCR:FZ16. We pass a separate
@@ -240,6 +230,12 @@ typedef struct NVICState NVICState;
* they ignore FPCR.RMode. But they don't ignore FPCR.FZ16,
* which means we need an FPST_AH_F16 as well.
*
+ * The "ZA" float_status are for Streaming SVE operations which use
+ * default-NaN and do not generate fp exceptions, which means that they
+ * do not accumulate exception bits back into FPCR.
+ * See e.g. FPAdd vs FPAdd_ZA pseudocode functions, and the setting
+ * of fpcr.DN and fpexec parameters.
+ *
* To avoid having to transfer exception bits around, we simply
* say that the FPSCR cumulative exception flags are the logical
* OR of the flags in the four fp statuses. This relies on the
@@ -253,10 +249,12 @@ typedef enum ARMFPStatusFlavour {
FPST_A64_F16,
FPST_AH,
FPST_AH_F16,
+ FPST_ZA,
+ FPST_ZA_F16,
FPST_STD,
FPST_STD_F16,
} ARMFPStatusFlavour;
-#define FPST_COUNT 8
+#define FPST_COUNT 10
typedef struct CPUArchState {
/* Regs for current mode. */
@@ -270,7 +268,7 @@ typedef struct CPUArchState {
uint64_t xregs[32];
uint64_t pc;
/* PSTATE isn't an architectural register for ARMv8. However, it is
- * convenient for us to assemble the underlying state into a 32 bit format
+ * convenient for us to assemble the underlying state into a 64 bit format
* identical to the architectural format used for the SPSR. (This is also
* what the Linux kernel's 'pstate' field in signal handlers and KVM's
* 'pstate' register are.) Of the PSTATE bits:
@@ -282,7 +280,7 @@ typedef struct CPUArchState {
* SM and ZA are kept in env->svcr
* all other bits are stored in their correct places in env->pstate
*/
- uint32_t pstate;
+ uint64_t pstate;
bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */
bool thumb; /* True if CPU is in thumb mode; cpsr[5] */
@@ -340,10 +338,10 @@ typedef struct CPUArchState {
};
uint64_t sctlr_el[4];
};
+ uint64_t sctlr2_el[4]; /* Extension to System control register. */
uint64_t vsctlr; /* Virtualization System control register. */
uint64_t cpacr_el1; /* Architectural feature access control register */
uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
- uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
uint64_t sder; /* Secure debug enable register. */
uint32_t nsacr; /* Non-secure access control register. */
union { /* MMU translation table base 0. */
@@ -368,8 +366,12 @@ typedef struct CPUArchState {
uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */
/* MMU translation table base control. */
uint64_t tcr_el[4];
+ uint64_t tcr2_el[3];
uint64_t vtcr_el2; /* Virtualization Translation Control. */
uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */
+ uint64_t pir_el[4]; /* PIRE0_EL1, PIR_EL1, PIR_EL2, PIR_EL3 */
+ uint64_t pire0_el2;
+ uint64_t s2pir_el2;
uint32_t c2_data; /* MPU data cacheable bits. */
uint32_t c2_insn; /* MPU instruction cacheable bits. */
union { /* MMU domain access control register
@@ -514,7 +516,6 @@ typedef struct CPUArchState {
uint64_t cntvoff_el2; /* Counter Virtual Offset register */
uint64_t cntpoff_el2; /* Counter Physical Offset register */
ARMGenericTimer c14_timer[NUM_GTIMERS];
- uint32_t c15_cpar; /* XScale Coprocessor Access Register */
uint32_t c15_ticonfig; /* TI925T configuration byte. */
uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
@@ -579,6 +580,18 @@ typedef struct CPUArchState {
/* NV2 register */
uint64_t vncr_el2;
+
+ uint64_t gcscr_el[4]; /* GCSCRE0_EL1, GCSCR_EL[123] */
+ uint64_t gcspr_el[4]; /* GCSPR_EL[0123] */
+
+ /* MEC registers */
+ uint64_t mecid_p0_el2;
+ uint64_t mecid_a0_el2;
+ uint64_t mecid_p1_el2;
+ uint64_t mecid_a1_el2;
+ uint64_t mecid_rl_a_el3;
+ uint64_t vmecid_p_el2;
+ uint64_t vmecid_a_el2;
} cp15;
struct {
@@ -633,13 +646,10 @@ typedef struct CPUArchState {
* entry process.
*/
struct {
- uint32_t syndrome; /* AArch64 format syndrome register */
- uint32_t fsr; /* AArch32 format fault status register info */
+ uint64_t syndrome; /* AArch64 format syndrome register */
uint64_t vaddress; /* virtual addr associated with exception, if any */
+ uint32_t fsr; /* AArch32 format fault status register info */
uint32_t target_el; /* EL the exception should be targeted for */
- /* If we implement EL2 we will also need to store information
- * about the intermediate physical address for stage 2 faults.
- */
} exception;
/* Information associated with an SError */
@@ -662,13 +672,11 @@ typedef struct CPUArchState {
struct {
ARMVectorReg zregs[32];
-#ifdef TARGET_AARCH64
/* Store FFR as pregs[16] to make it easier to treat as any other. */
#define FFR_PRED_NUM 16
ARMPredicateReg pregs[17];
/* Scratch space for aa64 sve predicate temporary. */
ARMPredicateReg preg_tmp;
-#endif
/* We store these fpcsr fields separately for convenience. */
uint32_t qc[4] QEMU_ALIGNED(16);
@@ -684,9 +692,6 @@ typedef struct CPUArchState {
uint32_t xregs[16];
- /* Scratch space for aa32 neon expansion. */
- uint32_t scratch[8];
-
/* There are a number of distinct float control structures. */
float_status fp_status[FPST_COUNT];
@@ -705,15 +710,6 @@ typedef struct CPUArchState {
*/
uint64_t exclusive_high;
- /* iwMMXt coprocessor state. */
- struct {
- uint64_t regs[16];
- uint64_t val;
-
- uint32_t cregs[16];
- } iwmmxt;
-
-#ifdef TARGET_AARCH64
struct {
ARMPACKey apia;
ARMPACKey apib;
@@ -724,28 +720,36 @@ typedef struct CPUArchState {
uint64_t scxtnum_el[4];
- /*
- * SME ZA storage -- 256 x 256 byte array, with bytes in host word order,
- * as we do with vfp.zregs[]. This corresponds to the architectural ZA
- * array, where ZA[N] is in the least-significant bytes of env->zarray[N].
- * When SVL is less than the architectural maximum, the accessible
- * storage is restricted, such that if the SVL is X bytes the guest can
- * see only the bottom X elements of zarray[], and only the least
- * significant X bytes of each element of the array. (In other words,
- * the observable part is always square.)
- *
- * The ZA storage can also be considered as a set of square tiles of
- * elements of different sizes. The mapping from tiles to the ZA array
- * is architecturally defined, such that for tiles of elements of esz
- * bytes, the Nth row (or "horizontal slice") of tile T is in
- * ZA[T + N * esz]. Note that this means that each tile is not contiguous
- * in the ZA storage, because its rows are striped through the ZA array.
- *
- * Because this is so large, keep this toward the end of the reset area,
- * to keep the offsets into the rest of the structure smaller.
- */
- ARMVectorReg zarray[ARM_MAX_VQ * 16];
-#endif
+ struct {
+ /* SME2 ZT0 -- 512 bit array, with data ordered like ARMVectorReg. */
+ uint64_t zt0[512 / 64] QEMU_ALIGNED(16);
+
+ /*
+ * SME ZA storage -- 256 x 256 byte array, with bytes in host
+ * word order, as we do with vfp.zregs[]. This corresponds to
+ * the architectural ZA array, where ZA[N] is in the least
+ * significant bytes of env->za_state.za[N].
+ *
+ * When SVL is less than the architectural maximum, the accessible
+ * storage is restricted, such that if the SVL is X bytes the guest
+ * can see only the bottom X elements of zarray[], and only the least
+ * significant X bytes of each element of the array. (In other words,
+ * the observable part is always square.)
+ *
+ * The ZA storage can also be considered as a set of square tiles of
+ * elements of different sizes. The mapping from tiles to the ZA array
+ * is architecturally defined, such that for tiles of elements of esz
+ * bytes, the Nth row (or "horizontal slice") of tile T is in
+ * ZA[T + N * esz]. Note that this means that each tile is not
+ * contiguous in the ZA storage, because its rows are striped through
+ * the ZA array.
+ *
+ * Because this is so large, keep this toward the end of the
+ * reset area, to keep the offsets into the rest of the structure
+ * smaller.
+ */
+ ARMVectorReg za[ARM_MAX_VQ * 16];
+ } za_state;
struct CPUBreakpoint *cpu_breakpoint[16];
struct CPUWatchpoint *cpu_watchpoint[16];
@@ -801,12 +805,9 @@ typedef struct CPUArchState {
#else /* CONFIG_USER_ONLY */
/* For usermode syscall translation. */
bool eabi;
-#endif /* CONFIG_USER_ONLY */
-
-#ifdef TARGET_TAGGED_ADDRESSES
/* Linux syscall tagged address support */
bool tagged_addr_enable;
-#endif
+#endif /* CONFIG_USER_ONLY */
} CPUARMState;
static inline void set_feature(CPUARMState *env, int feature)
@@ -855,6 +856,53 @@ typedef struct {
uint32_t map, init, supported;
} ARMVQMap;
+/* REG is ID_XXX */
+#define FIELD_DP64_IDREG(ISAR, REG, FIELD, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \
+ regval = FIELD_DP64(regval, REG, FIELD, VALUE); \
+ i_->idregs[REG ## _EL1_IDX] = regval; \
+ })
+
+#define FIELD_DP32_IDREG(ISAR, REG, FIELD, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ uint64_t regval = i_->idregs[REG ## _EL1_IDX]; \
+ regval = FIELD_DP32(regval, REG, FIELD, VALUE); \
+ i_->idregs[REG ## _EL1_IDX] = regval; \
+ })
+
+#define FIELD_EX64_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_EX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define FIELD_EX32_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_EX32(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define FIELD_SEX64_IDREG(ISAR, REG, FIELD) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ FIELD_SEX64(i_->idregs[REG ## _EL1_IDX], REG, FIELD); \
+ })
+
+#define SET_IDREG(ISAR, REG, VALUE) \
+ ({ \
+ ARMISARegisters *i_ = (ISAR); \
+ i_->idregs[REG ## _EL1_IDX] = VALUE; \
+ })
+
+#define GET_IDREG(ISAR, REG) \
+ ({ \
+ const ARMISARegisters *i_ = (ISAR); \
+ i_->idregs[REG ## _EL1_IDX]; \
+ })
+
/**
* ARMCPU:
* @env: #CPUARMState
@@ -890,6 +938,7 @@ struct ArchCPU {
DynamicGDBFeatureInfo dyn_sysreg_feature;
DynamicGDBFeatureInfo dyn_svereg_feature;
+ DynamicGDBFeatureInfo dyn_smereg_feature;
DynamicGDBFeatureInfo dyn_m_systemreg_feature;
DynamicGDBFeatureInfo dyn_m_secextreg_feature;
@@ -973,7 +1022,6 @@ struct ArchCPU {
*/
uint32_t kvm_target;
-#ifdef CONFIG_KVM
/* KVM init features for this CPU */
uint32_t kvm_init_features[7];
@@ -986,7 +1034,6 @@ struct ArchCPU {
/* KVM steal time */
OnOffAuto kvm_steal_time;
-#endif /* CONFIG_KVM */
/* Uniprocessor system with MP extensions */
bool mp_is_up;
@@ -1025,44 +1072,14 @@ struct ArchCPU {
* field by reading the value from the KVM vCPU.
*/
struct ARMISARegisters {
- uint32_t id_isar0;
- uint32_t id_isar1;
- uint32_t id_isar2;
- uint32_t id_isar3;
- uint32_t id_isar4;
- uint32_t id_isar5;
- uint32_t id_isar6;
- uint32_t id_mmfr0;
- uint32_t id_mmfr1;
- uint32_t id_mmfr2;
- uint32_t id_mmfr3;
- uint32_t id_mmfr4;
- uint32_t id_mmfr5;
- uint32_t id_pfr0;
- uint32_t id_pfr1;
- uint32_t id_pfr2;
uint32_t mvfr0;
uint32_t mvfr1;
uint32_t mvfr2;
- uint32_t id_dfr0;
- uint32_t id_dfr1;
uint32_t dbgdidr;
uint32_t dbgdevid;
uint32_t dbgdevid1;
- uint64_t id_aa64isar0;
- uint64_t id_aa64isar1;
- uint64_t id_aa64isar2;
- uint64_t id_aa64pfr0;
- uint64_t id_aa64pfr1;
- uint64_t id_aa64mmfr0;
- uint64_t id_aa64mmfr1;
- uint64_t id_aa64mmfr2;
- uint64_t id_aa64mmfr3;
- uint64_t id_aa64dfr0;
- uint64_t id_aa64dfr1;
- uint64_t id_aa64zfr0;
- uint64_t id_aa64smfr0;
uint64_t reset_pmcr_el0;
+ uint64_t idregs[NUM_ID_IDX];
} isar;
uint64_t midr;
uint32_t revidr;
@@ -1071,10 +1088,6 @@ struct ArchCPU {
uint32_t reset_sctlr;
uint64_t pmceid0;
uint64_t pmceid1;
- uint32_t id_afr0;
- uint64_t id_aa64afr0;
- uint64_t id_aa64afr1;
- uint64_t clidr;
uint64_t mp_affinity; /* MP ID without feature bits */
/* The elements of this array are the CCSIDR values for each cache,
* in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
@@ -1125,6 +1138,7 @@ struct ArchCPU {
/* Used to set the maximum vector length the cpu will support. */
uint32_t sve_max_vq;
+ uint32_t sme_max_vq;
#ifdef CONFIG_USER_ONLY
/* Used to set the default vector length at process start. */
@@ -1143,7 +1157,7 @@ typedef struct ARMCPUInfo {
const char *name;
const char *deprecation_note;
void (*initfn)(Object *obj);
- void (*class_init)(ObjectClass *oc, void *data);
+ void (*class_init)(ObjectClass *oc, const void *data);
} ARMCPUInfo;
/**
@@ -1161,10 +1175,6 @@ struct ARMCPUClass {
ResettablePhases parent_phases;
};
-struct AArch64CPUClass {
- ARMCPUClass parent_class;
-};
-
/* Callback functions for the generic timer's timers. */
void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
@@ -1177,8 +1187,6 @@ void arm_gt_sel2vtimer_cb(void *opaque);
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
-void arm_cpu_post_init(Object *obj);
-
#define ARM_AFF0_SHIFT 0
#define ARM_AFF0_MASK (0xFFULL << ARM_AFF0_SHIFT)
#define ARM_AFF1_SHIFT 8
@@ -1236,7 +1244,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
*/
void arm_emulate_firmware_reset(CPUState *cpustate, int target_el);
-#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
@@ -1268,13 +1275,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
#endif
}
-#else
-static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
-static inline void aarch64_sve_change_el(CPUARMState *env, int o,
- int n, bool a)
-{ }
-#endif
-
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
@@ -1426,6 +1426,19 @@ void pmu_init(ARMCPU *cpu);
#define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */
#define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */
+#define SCTLR2_EMEC (1ULL << 1) /* FEAT_MEC */
+#define SCTLR2_NMEA (1ULL << 2) /* FEAT_DoubleFault2 */
+#define SCTLR2_ENADERR (1ULL << 3) /* FEAT_ADERR */
+#define SCTLR2_ENANERR (1ULL << 4) /* FEAT_ANERR */
+#define SCTLR2_EASE (1ULL << 5) /* FEAT_DoubleFault2 */
+#define SCTLR2_ENIDCP128 (1ULL << 6) /* FEAT_SYSREG128 */
+#define SCTLR2_ENPACM (1ULL << 7) /* FEAT_PAuth_LR */
+#define SCTLR2_ENPACM0 (1ULL << 8) /* FEAT_PAuth_LR */
+#define SCTLR2_CPTA (1ULL << 9) /* FEAT_CPA2 */
+#define SCTLR2_CPTA0 (1ULL << 10) /* FEAT_CPA2 */
+#define SCTLR2_CPTM (1ULL << 11) /* FEAT_CPA2 */
+#define SCTLR2_CPTM0 (1ULL << 12) /* FEAT_CAP2 */
+
#define CPSR_M (0x1fU)
#define CPSR_T (1U << 5)
#define CPSR_F (1U << 6)
@@ -1498,6 +1511,7 @@ void pmu_init(ARMCPU *cpu);
#define PSTATE_C (1U << 29)
#define PSTATE_Z (1U << 30)
#define PSTATE_N (1U << 31)
+#define PSTATE_EXLOCK (1ULL << 34)
#define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
#define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
#define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE)
@@ -1516,6 +1530,7 @@ FIELD(SVCR, ZA, 1, 1)
/* Fields for SMCR_ELx. */
FIELD(SMCR, LEN, 0, 4)
+FIELD(SMCR, EZT0, 30, 1)
FIELD(SMCR, FA64, 31, 1)
/* Write a new value to v7m.exception, thus transitioning into or out
@@ -1533,7 +1548,7 @@ static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
* interprocessing, so we don't attempt to sync with the cpsr state used by
* the 32 bit decoder.
*/
-static inline uint32_t pstate_read(CPUARMState *env)
+static inline uint64_t pstate_read(CPUARMState *env)
{
int ZF;
@@ -1543,7 +1558,7 @@ static inline uint32_t pstate_read(CPUARMState *env)
| env->pstate | env->daif | (env->btype << 10);
}
-static inline void pstate_write(CPUARMState *env, uint32_t val)
+static inline void pstate_write(CPUARMState *env, uint64_t val)
{
env->ZF = (~val) & PSTATE_Z;
env->NF = val;
@@ -1715,11 +1730,24 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define SCR_ENAS0 (1ULL << 36)
#define SCR_ADEN (1ULL << 37)
#define SCR_HXEN (1ULL << 38)
+#define SCR_GCSEN (1ULL << 39)
#define SCR_TRNDR (1ULL << 40)
#define SCR_ENTP2 (1ULL << 41)
+#define SCR_TCR2EN (1ULL << 43)
+#define SCR_SCTLR2EN (1ULL << 44)
+#define SCR_PIEN (1ULL << 45)
#define SCR_GPF (1ULL << 48)
+#define SCR_MECEN (1ULL << 49)
#define SCR_NSE (1ULL << 62)
+/* GCSCR_ELx fields */
+#define GCSCR_PCRSEL (1ULL << 0)
+#define GCSCR_RVCHKEN (1ULL << 5)
+#define GCSCR_EXLOCKEN (1ULL << 6)
+#define GCSCR_PUSHMEN (1ULL << 8)
+#define GCSCR_STREN (1ULL << 9)
+#define GCSCRE0_NTR (1ULL << 10)
+
/* Return the current FPSCR value. */
uint32_t vfp_get_fpscr(CPUARMState *env);
void vfp_set_fpscr(CPUARMState *env, uint32_t val);
@@ -1853,16 +1881,6 @@ enum arm_cpu_mode {
/* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */
#define QEMU_VFP_FPSCR_NZCV 0xffff
-/* iwMMXt coprocessor control registers. */
-#define ARM_IWMMXT_wCID 0
-#define ARM_IWMMXT_wCon 1
-#define ARM_IWMMXT_wCSSF 2
-#define ARM_IWMMXT_wCASF 3
-#define ARM_IWMMXT_wCGR0 8
-#define ARM_IWMMXT_wCGR1 9
-#define ARM_IWMMXT_wCGR2 10
-#define ARM_IWMMXT_wCGR3 11
-
/* V7M CCR bits */
FIELD(V7M_CCR, NONBASETHRDENA, 0, 1)
FIELD(V7M_CCR, USERSETMPEND, 1, 1)
@@ -2001,423 +2019,20 @@ FIELD(V7M_VPR, P0, 0, 16)
FIELD(V7M_VPR, MASK01, 16, 4)
FIELD(V7M_VPR, MASK23, 20, 4)
-/*
- * System register ID fields.
- */
-FIELD(CLIDR_EL1, CTYPE1, 0, 3)
-FIELD(CLIDR_EL1, CTYPE2, 3, 3)
-FIELD(CLIDR_EL1, CTYPE3, 6, 3)
-FIELD(CLIDR_EL1, CTYPE4, 9, 3)
-FIELD(CLIDR_EL1, CTYPE5, 12, 3)
-FIELD(CLIDR_EL1, CTYPE6, 15, 3)
-FIELD(CLIDR_EL1, CTYPE7, 18, 3)
-FIELD(CLIDR_EL1, LOUIS, 21, 3)
-FIELD(CLIDR_EL1, LOC, 24, 3)
-FIELD(CLIDR_EL1, LOUU, 27, 3)
-FIELD(CLIDR_EL1, ICB, 30, 3)
-
-/* When FEAT_CCIDX is implemented */
-FIELD(CCSIDR_EL1, CCIDX_LINESIZE, 0, 3)
-FIELD(CCSIDR_EL1, CCIDX_ASSOCIATIVITY, 3, 21)
-FIELD(CCSIDR_EL1, CCIDX_NUMSETS, 32, 24)
-
-/* When FEAT_CCIDX is not implemented */
-FIELD(CCSIDR_EL1, LINESIZE, 0, 3)
-FIELD(CCSIDR_EL1, ASSOCIATIVITY, 3, 10)
-FIELD(CCSIDR_EL1, NUMSETS, 13, 15)
-
-FIELD(CTR_EL0, IMINLINE, 0, 4)
-FIELD(CTR_EL0, L1IP, 14, 2)
-FIELD(CTR_EL0, DMINLINE, 16, 4)
-FIELD(CTR_EL0, ERG, 20, 4)
-FIELD(CTR_EL0, CWG, 24, 4)
-FIELD(CTR_EL0, IDC, 28, 1)
-FIELD(CTR_EL0, DIC, 29, 1)
-FIELD(CTR_EL0, TMINLINE, 32, 6)
-
-FIELD(MIDR_EL1, REVISION, 0, 4)
-FIELD(MIDR_EL1, PARTNUM, 4, 12)
-FIELD(MIDR_EL1, ARCHITECTURE, 16, 4)
-FIELD(MIDR_EL1, VARIANT, 20, 4)
-FIELD(MIDR_EL1, IMPLEMENTER, 24, 8)
-
-FIELD(ID_ISAR0, SWAP, 0, 4)
-FIELD(ID_ISAR0, BITCOUNT, 4, 4)
-FIELD(ID_ISAR0, BITFIELD, 8, 4)
-FIELD(ID_ISAR0, CMPBRANCH, 12, 4)
-FIELD(ID_ISAR0, COPROC, 16, 4)
-FIELD(ID_ISAR0, DEBUG, 20, 4)
-FIELD(ID_ISAR0, DIVIDE, 24, 4)
-
-FIELD(ID_ISAR1, ENDIAN, 0, 4)
-FIELD(ID_ISAR1, EXCEPT, 4, 4)
-FIELD(ID_ISAR1, EXCEPT_AR, 8, 4)
-FIELD(ID_ISAR1, EXTEND, 12, 4)
-FIELD(ID_ISAR1, IFTHEN, 16, 4)
-FIELD(ID_ISAR1, IMMEDIATE, 20, 4)
-FIELD(ID_ISAR1, INTERWORK, 24, 4)
-FIELD(ID_ISAR1, JAZELLE, 28, 4)
-
-FIELD(ID_ISAR2, LOADSTORE, 0, 4)
-FIELD(ID_ISAR2, MEMHINT, 4, 4)
-FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4)
-FIELD(ID_ISAR2, MULT, 12, 4)
-FIELD(ID_ISAR2, MULTS, 16, 4)
-FIELD(ID_ISAR2, MULTU, 20, 4)
-FIELD(ID_ISAR2, PSR_AR, 24, 4)
-FIELD(ID_ISAR2, REVERSAL, 28, 4)
-
-FIELD(ID_ISAR3, SATURATE, 0, 4)
-FIELD(ID_ISAR3, SIMD, 4, 4)
-FIELD(ID_ISAR3, SVC, 8, 4)
-FIELD(ID_ISAR3, SYNCHPRIM, 12, 4)
-FIELD(ID_ISAR3, TABBRANCH, 16, 4)
-FIELD(ID_ISAR3, T32COPY, 20, 4)
-FIELD(ID_ISAR3, TRUENOP, 24, 4)
-FIELD(ID_ISAR3, T32EE, 28, 4)
-
-FIELD(ID_ISAR4, UNPRIV, 0, 4)
-FIELD(ID_ISAR4, WITHSHIFTS, 4, 4)
-FIELD(ID_ISAR4, WRITEBACK, 8, 4)
-FIELD(ID_ISAR4, SMC, 12, 4)
-FIELD(ID_ISAR4, BARRIER, 16, 4)
-FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4)
-FIELD(ID_ISAR4, PSR_M, 24, 4)
-FIELD(ID_ISAR4, SWP_FRAC, 28, 4)
-
-FIELD(ID_ISAR5, SEVL, 0, 4)
-FIELD(ID_ISAR5, AES, 4, 4)
-FIELD(ID_ISAR5, SHA1, 8, 4)
-FIELD(ID_ISAR5, SHA2, 12, 4)
-FIELD(ID_ISAR5, CRC32, 16, 4)
-FIELD(ID_ISAR5, RDM, 24, 4)
-FIELD(ID_ISAR5, VCMA, 28, 4)
-
-FIELD(ID_ISAR6, JSCVT, 0, 4)
-FIELD(ID_ISAR6, DP, 4, 4)
-FIELD(ID_ISAR6, FHM, 8, 4)
-FIELD(ID_ISAR6, SB, 12, 4)
-FIELD(ID_ISAR6, SPECRES, 16, 4)
-FIELD(ID_ISAR6, BF16, 20, 4)
-FIELD(ID_ISAR6, I8MM, 24, 4)
-
-FIELD(ID_MMFR0, VMSA, 0, 4)
-FIELD(ID_MMFR0, PMSA, 4, 4)
-FIELD(ID_MMFR0, OUTERSHR, 8, 4)
-FIELD(ID_MMFR0, SHARELVL, 12, 4)
-FIELD(ID_MMFR0, TCM, 16, 4)
-FIELD(ID_MMFR0, AUXREG, 20, 4)
-FIELD(ID_MMFR0, FCSE, 24, 4)
-FIELD(ID_MMFR0, INNERSHR, 28, 4)
-
-FIELD(ID_MMFR1, L1HVDVA, 0, 4)
-FIELD(ID_MMFR1, L1UNIVA, 4, 4)
-FIELD(ID_MMFR1, L1HVDSW, 8, 4)
-FIELD(ID_MMFR1, L1UNISW, 12, 4)
-FIELD(ID_MMFR1, L1HVD, 16, 4)
-FIELD(ID_MMFR1, L1UNI, 20, 4)
-FIELD(ID_MMFR1, L1TSTCLN, 24, 4)
-FIELD(ID_MMFR1, BPRED, 28, 4)
-
-FIELD(ID_MMFR2, L1HVDFG, 0, 4)
-FIELD(ID_MMFR2, L1HVDBG, 4, 4)
-FIELD(ID_MMFR2, L1HVDRNG, 8, 4)
-FIELD(ID_MMFR2, HVDTLB, 12, 4)
-FIELD(ID_MMFR2, UNITLB, 16, 4)
-FIELD(ID_MMFR2, MEMBARR, 20, 4)
-FIELD(ID_MMFR2, WFISTALL, 24, 4)
-FIELD(ID_MMFR2, HWACCFLG, 28, 4)
-
-FIELD(ID_MMFR3, CMAINTVA, 0, 4)
-FIELD(ID_MMFR3, CMAINTSW, 4, 4)
-FIELD(ID_MMFR3, BPMAINT, 8, 4)
-FIELD(ID_MMFR3, MAINTBCST, 12, 4)
-FIELD(ID_MMFR3, PAN, 16, 4)
-FIELD(ID_MMFR3, COHWALK, 20, 4)
-FIELD(ID_MMFR3, CMEMSZ, 24, 4)
-FIELD(ID_MMFR3, SUPERSEC, 28, 4)
-
-FIELD(ID_MMFR4, SPECSEI, 0, 4)
-FIELD(ID_MMFR4, AC2, 4, 4)
-FIELD(ID_MMFR4, XNX, 8, 4)
-FIELD(ID_MMFR4, CNP, 12, 4)
-FIELD(ID_MMFR4, HPDS, 16, 4)
-FIELD(ID_MMFR4, LSM, 20, 4)
-FIELD(ID_MMFR4, CCIDX, 24, 4)
-FIELD(ID_MMFR4, EVT, 28, 4)
-
-FIELD(ID_MMFR5, ETS, 0, 4)
-FIELD(ID_MMFR5, NTLBPA, 4, 4)
-
-FIELD(ID_PFR0, STATE0, 0, 4)
-FIELD(ID_PFR0, STATE1, 4, 4)
-FIELD(ID_PFR0, STATE2, 8, 4)
-FIELD(ID_PFR0, STATE3, 12, 4)
-FIELD(ID_PFR0, CSV2, 16, 4)
-FIELD(ID_PFR0, AMU, 20, 4)
-FIELD(ID_PFR0, DIT, 24, 4)
-FIELD(ID_PFR0, RAS, 28, 4)
-
-FIELD(ID_PFR1, PROGMOD, 0, 4)
-FIELD(ID_PFR1, SECURITY, 4, 4)
-FIELD(ID_PFR1, MPROGMOD, 8, 4)
-FIELD(ID_PFR1, VIRTUALIZATION, 12, 4)
-FIELD(ID_PFR1, GENTIMER, 16, 4)
-FIELD(ID_PFR1, SEC_FRAC, 20, 4)
-FIELD(ID_PFR1, VIRT_FRAC, 24, 4)
-FIELD(ID_PFR1, GIC, 28, 4)
-
-FIELD(ID_PFR2, CSV3, 0, 4)
-FIELD(ID_PFR2, SSBS, 4, 4)
-FIELD(ID_PFR2, RAS_FRAC, 8, 4)
-
-FIELD(ID_AA64ISAR0, AES, 4, 4)
-FIELD(ID_AA64ISAR0, SHA1, 8, 4)
-FIELD(ID_AA64ISAR0, SHA2, 12, 4)
-FIELD(ID_AA64ISAR0, CRC32, 16, 4)
-FIELD(ID_AA64ISAR0, ATOMIC, 20, 4)
-FIELD(ID_AA64ISAR0, TME, 24, 4)
-FIELD(ID_AA64ISAR0, RDM, 28, 4)
-FIELD(ID_AA64ISAR0, SHA3, 32, 4)
-FIELD(ID_AA64ISAR0, SM3, 36, 4)
-FIELD(ID_AA64ISAR0, SM4, 40, 4)
-FIELD(ID_AA64ISAR0, DP, 44, 4)
-FIELD(ID_AA64ISAR0, FHM, 48, 4)
-FIELD(ID_AA64ISAR0, TS, 52, 4)
-FIELD(ID_AA64ISAR0, TLB, 56, 4)
-FIELD(ID_AA64ISAR0, RNDR, 60, 4)
-
-FIELD(ID_AA64ISAR1, DPB, 0, 4)
-FIELD(ID_AA64ISAR1, APA, 4, 4)
-FIELD(ID_AA64ISAR1, API, 8, 4)
-FIELD(ID_AA64ISAR1, JSCVT, 12, 4)
-FIELD(ID_AA64ISAR1, FCMA, 16, 4)
-FIELD(ID_AA64ISAR1, LRCPC, 20, 4)
-FIELD(ID_AA64ISAR1, GPA, 24, 4)
-FIELD(ID_AA64ISAR1, GPI, 28, 4)
-FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
-FIELD(ID_AA64ISAR1, SB, 36, 4)
-FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
-FIELD(ID_AA64ISAR1, BF16, 44, 4)
-FIELD(ID_AA64ISAR1, DGH, 48, 4)
-FIELD(ID_AA64ISAR1, I8MM, 52, 4)
-FIELD(ID_AA64ISAR1, XS, 56, 4)
-FIELD(ID_AA64ISAR1, LS64, 60, 4)
-
-FIELD(ID_AA64ISAR2, WFXT, 0, 4)
-FIELD(ID_AA64ISAR2, RPRES, 4, 4)
-FIELD(ID_AA64ISAR2, GPA3, 8, 4)
-FIELD(ID_AA64ISAR2, APA3, 12, 4)
-FIELD(ID_AA64ISAR2, MOPS, 16, 4)
-FIELD(ID_AA64ISAR2, BC, 20, 4)
-FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4)
-FIELD(ID_AA64ISAR2, CLRBHB, 28, 4)
-FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4)
-FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4)
-FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4)
-FIELD(ID_AA64ISAR2, RPRFM, 48, 4)
-FIELD(ID_AA64ISAR2, CSSC, 52, 4)
-FIELD(ID_AA64ISAR2, ATS1A, 60, 4)
-
-FIELD(ID_AA64PFR0, EL0, 0, 4)
-FIELD(ID_AA64PFR0, EL1, 4, 4)
-FIELD(ID_AA64PFR0, EL2, 8, 4)
-FIELD(ID_AA64PFR0, EL3, 12, 4)
-FIELD(ID_AA64PFR0, FP, 16, 4)
-FIELD(ID_AA64PFR0, ADVSIMD, 20, 4)
-FIELD(ID_AA64PFR0, GIC, 24, 4)
-FIELD(ID_AA64PFR0, RAS, 28, 4)
-FIELD(ID_AA64PFR0, SVE, 32, 4)
-FIELD(ID_AA64PFR0, SEL2, 36, 4)
-FIELD(ID_AA64PFR0, MPAM, 40, 4)
-FIELD(ID_AA64PFR0, AMU, 44, 4)
-FIELD(ID_AA64PFR0, DIT, 48, 4)
-FIELD(ID_AA64PFR0, RME, 52, 4)
-FIELD(ID_AA64PFR0, CSV2, 56, 4)
-FIELD(ID_AA64PFR0, CSV3, 60, 4)
-
-FIELD(ID_AA64PFR1, BT, 0, 4)
-FIELD(ID_AA64PFR1, SSBS, 4, 4)
-FIELD(ID_AA64PFR1, MTE, 8, 4)
-FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4)
-FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4)
-FIELD(ID_AA64PFR1, SME, 24, 4)
-FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4)
-FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4)
-FIELD(ID_AA64PFR1, NMI, 36, 4)
-FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4)
-FIELD(ID_AA64PFR1, GCS, 44, 4)
-FIELD(ID_AA64PFR1, THE, 48, 4)
-FIELD(ID_AA64PFR1, MTEX, 52, 4)
-FIELD(ID_AA64PFR1, DF2, 56, 4)
-FIELD(ID_AA64PFR1, PFAR, 60, 4)
-
-FIELD(ID_AA64MMFR0, PARANGE, 0, 4)
-FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4)
-FIELD(ID_AA64MMFR0, BIGEND, 8, 4)
-FIELD(ID_AA64MMFR0, SNSMEM, 12, 4)
-FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4)
-FIELD(ID_AA64MMFR0, TGRAN16, 20, 4)
-FIELD(ID_AA64MMFR0, TGRAN64, 24, 4)
-FIELD(ID_AA64MMFR0, TGRAN4, 28, 4)
-FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4)
-FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4)
-FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4)
-FIELD(ID_AA64MMFR0, EXS, 44, 4)
-FIELD(ID_AA64MMFR0, FGT, 56, 4)
-FIELD(ID_AA64MMFR0, ECV, 60, 4)
-
-FIELD(ID_AA64MMFR1, HAFDBS, 0, 4)
-FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4)
-FIELD(ID_AA64MMFR1, VH, 8, 4)
-FIELD(ID_AA64MMFR1, HPDS, 12, 4)
-FIELD(ID_AA64MMFR1, LO, 16, 4)
-FIELD(ID_AA64MMFR1, PAN, 20, 4)
-FIELD(ID_AA64MMFR1, SPECSEI, 24, 4)
-FIELD(ID_AA64MMFR1, XNX, 28, 4)
-FIELD(ID_AA64MMFR1, TWED, 32, 4)
-FIELD(ID_AA64MMFR1, ETS, 36, 4)
-FIELD(ID_AA64MMFR1, HCX, 40, 4)
-FIELD(ID_AA64MMFR1, AFP, 44, 4)
-FIELD(ID_AA64MMFR1, NTLBPA, 48, 4)
-FIELD(ID_AA64MMFR1, TIDCP1, 52, 4)
-FIELD(ID_AA64MMFR1, CMOW, 56, 4)
-FIELD(ID_AA64MMFR1, ECBHB, 60, 4)
-
-FIELD(ID_AA64MMFR2, CNP, 0, 4)
-FIELD(ID_AA64MMFR2, UAO, 4, 4)
-FIELD(ID_AA64MMFR2, LSM, 8, 4)
-FIELD(ID_AA64MMFR2, IESB, 12, 4)
-FIELD(ID_AA64MMFR2, VARANGE, 16, 4)
-FIELD(ID_AA64MMFR2, CCIDX, 20, 4)
-FIELD(ID_AA64MMFR2, NV, 24, 4)
-FIELD(ID_AA64MMFR2, ST, 28, 4)
-FIELD(ID_AA64MMFR2, AT, 32, 4)
-FIELD(ID_AA64MMFR2, IDS, 36, 4)
-FIELD(ID_AA64MMFR2, FWB, 40, 4)
-FIELD(ID_AA64MMFR2, TTL, 48, 4)
-FIELD(ID_AA64MMFR2, BBM, 52, 4)
-FIELD(ID_AA64MMFR2, EVT, 56, 4)
-FIELD(ID_AA64MMFR2, E0PD, 60, 4)
-
-FIELD(ID_AA64MMFR3, TCRX, 0, 4)
-FIELD(ID_AA64MMFR3, SCTLRX, 4, 4)
-FIELD(ID_AA64MMFR3, S1PIE, 8, 4)
-FIELD(ID_AA64MMFR3, S2PIE, 12, 4)
-FIELD(ID_AA64MMFR3, S1POE, 16, 4)
-FIELD(ID_AA64MMFR3, S2POE, 20, 4)
-FIELD(ID_AA64MMFR3, AIE, 24, 4)
-FIELD(ID_AA64MMFR3, MEC, 28, 4)
-FIELD(ID_AA64MMFR3, D128, 32, 4)
-FIELD(ID_AA64MMFR3, D128_2, 36, 4)
-FIELD(ID_AA64MMFR3, SNERR, 40, 4)
-FIELD(ID_AA64MMFR3, ANERR, 44, 4)
-FIELD(ID_AA64MMFR3, SDERR, 52, 4)
-FIELD(ID_AA64MMFR3, ADERR, 56, 4)
-FIELD(ID_AA64MMFR3, SPEC_FPACC, 60, 4)
-
-FIELD(ID_AA64DFR0, DEBUGVER, 0, 4)
-FIELD(ID_AA64DFR0, TRACEVER, 4, 4)
-FIELD(ID_AA64DFR0, PMUVER, 8, 4)
-FIELD(ID_AA64DFR0, BRPS, 12, 4)
-FIELD(ID_AA64DFR0, PMSS, 16, 4)
-FIELD(ID_AA64DFR0, WRPS, 20, 4)
-FIELD(ID_AA64DFR0, SEBEP, 24, 4)
-FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4)
-FIELD(ID_AA64DFR0, PMSVER, 32, 4)
-FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4)
-FIELD(ID_AA64DFR0, TRACEFILT, 40, 4)
-FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4)
-FIELD(ID_AA64DFR0, MTPMU, 48, 4)
-FIELD(ID_AA64DFR0, BRBE, 52, 4)
-FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4)
-FIELD(ID_AA64DFR0, HPMN0, 60, 4)
-
-FIELD(ID_AA64ZFR0, SVEVER, 0, 4)
-FIELD(ID_AA64ZFR0, AES, 4, 4)
-FIELD(ID_AA64ZFR0, BITPERM, 16, 4)
-FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4)
-FIELD(ID_AA64ZFR0, B16B16, 24, 4)
-FIELD(ID_AA64ZFR0, SHA3, 32, 4)
-FIELD(ID_AA64ZFR0, SM4, 40, 4)
-FIELD(ID_AA64ZFR0, I8MM, 44, 4)
-FIELD(ID_AA64ZFR0, F32MM, 52, 4)
-FIELD(ID_AA64ZFR0, F64MM, 56, 4)
-
-FIELD(ID_AA64SMFR0, F32F32, 32, 1)
-FIELD(ID_AA64SMFR0, BI32I32, 33, 1)
-FIELD(ID_AA64SMFR0, B16F32, 34, 1)
-FIELD(ID_AA64SMFR0, F16F32, 35, 1)
-FIELD(ID_AA64SMFR0, I8I32, 36, 4)
-FIELD(ID_AA64SMFR0, F16F16, 42, 1)
-FIELD(ID_AA64SMFR0, B16B16, 43, 1)
-FIELD(ID_AA64SMFR0, I16I32, 44, 4)
-FIELD(ID_AA64SMFR0, F64F64, 48, 1)
-FIELD(ID_AA64SMFR0, I16I64, 52, 4)
-FIELD(ID_AA64SMFR0, SMEVER, 56, 4)
-FIELD(ID_AA64SMFR0, FA64, 63, 1)
-
-FIELD(ID_DFR0, COPDBG, 0, 4)
-FIELD(ID_DFR0, COPSDBG, 4, 4)
-FIELD(ID_DFR0, MMAPDBG, 8, 4)
-FIELD(ID_DFR0, COPTRC, 12, 4)
-FIELD(ID_DFR0, MMAPTRC, 16, 4)
-FIELD(ID_DFR0, MPROFDBG, 20, 4)
-FIELD(ID_DFR0, PERFMON, 24, 4)
-FIELD(ID_DFR0, TRACEFILT, 28, 4)
-
-FIELD(ID_DFR1, MTPMU, 0, 4)
-FIELD(ID_DFR1, HPMN0, 4, 4)
-
-FIELD(DBGDIDR, SE_IMP, 12, 1)
-FIELD(DBGDIDR, NSUHD_IMP, 14, 1)
-FIELD(DBGDIDR, VERSION, 16, 4)
-FIELD(DBGDIDR, CTX_CMPS, 20, 4)
-FIELD(DBGDIDR, BRPS, 24, 4)
-FIELD(DBGDIDR, WRPS, 28, 4)
-
-FIELD(DBGDEVID, PCSAMPLE, 0, 4)
-FIELD(DBGDEVID, WPADDRMASK, 4, 4)
-FIELD(DBGDEVID, BPADDRMASK, 8, 4)
-FIELD(DBGDEVID, VECTORCATCH, 12, 4)
-FIELD(DBGDEVID, VIRTEXTNS, 16, 4)
-FIELD(DBGDEVID, DOUBLELOCK, 20, 4)
-FIELD(DBGDEVID, AUXREGS, 24, 4)
-FIELD(DBGDEVID, CIDMASK, 28, 4)
-
-FIELD(DBGDEVID1, PCSROFFSET, 0, 4)
-
-FIELD(MVFR0, SIMDREG, 0, 4)
-FIELD(MVFR0, FPSP, 4, 4)
-FIELD(MVFR0, FPDP, 8, 4)
-FIELD(MVFR0, FPTRAP, 12, 4)
-FIELD(MVFR0, FPDIVIDE, 16, 4)
-FIELD(MVFR0, FPSQRT, 20, 4)
-FIELD(MVFR0, FPSHVEC, 24, 4)
-FIELD(MVFR0, FPROUND, 28, 4)
-
-FIELD(MVFR1, FPFTZ, 0, 4)
-FIELD(MVFR1, FPDNAN, 4, 4)
-FIELD(MVFR1, SIMDLS, 8, 4) /* A-profile only */
-FIELD(MVFR1, SIMDINT, 12, 4) /* A-profile only */
-FIELD(MVFR1, SIMDSP, 16, 4) /* A-profile only */
-FIELD(MVFR1, SIMDHP, 20, 4) /* A-profile only */
-FIELD(MVFR1, MVE, 8, 4) /* M-profile only */
-FIELD(MVFR1, FP16, 20, 4) /* M-profile only */
-FIELD(MVFR1, FPHP, 24, 4)
-FIELD(MVFR1, SIMDFMAC, 28, 4)
-
-FIELD(MVFR2, SIMDMISC, 0, 4)
-FIELD(MVFR2, FPMISC, 4, 4)
-
FIELD(GPCCR, PPS, 0, 3)
+FIELD(GPCCR, RLPAD, 5, 1)
+FIELD(GPCCR, NSPAD, 6, 1)
+FIELD(GPCCR, SPAD, 7, 1)
FIELD(GPCCR, IRGN, 8, 2)
FIELD(GPCCR, ORGN, 10, 2)
FIELD(GPCCR, SH, 12, 2)
FIELD(GPCCR, PGS, 14, 2)
FIELD(GPCCR, GPC, 16, 1)
FIELD(GPCCR, GPCP, 17, 1)
+FIELD(GPCCR, TBGPCD, 18, 1)
+FIELD(GPCCR, NSO, 19, 1)
FIELD(GPCCR, L0GPTSZ, 20, 4)
+FIELD(GPCCR, APPSAA, 24, 1)
FIELD(MFAR, FPA, 12, 40)
FIELD(MFAR, NSE, 62, 1)
@@ -2431,8 +2046,6 @@ QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
*/
enum arm_features {
ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
- ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
- ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
ARM_FEATURE_V6,
ARM_FEATURE_V6K,
ARM_FEATURE_V7,
@@ -2633,6 +2246,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
*/
uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
uint64_t arm_hcr_el2_eff(CPUARMState *env);
+uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env);
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
/*
@@ -2712,212 +2326,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
#define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU
-/* ARM has the following "translation regimes" (as the ARM ARM calls them):
- *
- * If EL3 is 64-bit:
- * + NonSecure EL1 & 0 stage 1
- * + NonSecure EL1 & 0 stage 2
- * + NonSecure EL2
- * + NonSecure EL2 & 0 (ARMv8.1-VHE)
- * + Secure EL1 & 0 stage 1
- * + Secure EL1 & 0 stage 2 (FEAT_SEL2)
- * + Secure EL2 (FEAT_SEL2)
- * + Secure EL2 & 0 (FEAT_SEL2)
- * + Realm EL1 & 0 stage 1 (FEAT_RME)
- * + Realm EL1 & 0 stage 2 (FEAT_RME)
- * + Realm EL2 (FEAT_RME)
- * + EL3
- * If EL3 is 32-bit:
- * + NonSecure PL1 & 0 stage 1
- * + NonSecure PL1 & 0 stage 2
- * + NonSecure PL2
- * + Secure PL1 & 0
- * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
- *
- * For QEMU, an mmu_idx is not quite the same as a translation regime because:
- * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes,
- * because they may differ in access permissions even if the VA->PA map is
- * the same
- * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
- * translation, which means that we have one mmu_idx that deals with two
- * concatenated translation regimes [this sort of combined s1+2 TLB is
- * architecturally permitted]
- * 3. we don't need to allocate an mmu_idx to translations that we won't be
- * handling via the TLB. The only way to do a stage 1 translation without
- * the immediate stage 2 translation is via the ATS or AT system insns,
- * which can be slow-pathed and always do a page table walk.
- * The only use of stage 2 translations is either as part of an s1+2
- * lookup or when loading the descriptors during a stage 1 page table walk,
- * and in both those cases we don't use the TLB.
- * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
- * translation regimes, because they map reasonably well to each other
- * and they can't both be active at the same time.
- * 5. we want to be able to use the TLB for accesses done as part of a
- * stage1 page table walk, rather than having to walk the stage2 page
- * table over and over.
- * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
- * Never (PAN) bit within PSTATE.
- * 7. we fold together most secure and non-secure regimes for A-profile,
- * because there are no banked system registers for aarch64, so the
- * process of switching between secure and non-secure is
- * already heavyweight.
- * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
- * because both are in use simultaneously for Secure EL2.
- *
- * This gives us the following list of cases:
- *
- * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
- * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
- * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
- * EL0 EL2&0
- * EL2 EL2&0
- * EL2 EL2&0 +PAN
- * EL2 (aka NS PL2)
- * EL3 (aka AArch32 S PL1 PL1&0)
- * AArch32 S PL0 PL1&0 (we call this EL30_0)
- * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
- * Stage2 Secure
- * Stage2 NonSecure
- * plus one TLB per Physical address space: S, NS, Realm, Root
- *
- * for a total of 16 different mmu_idx.
- *
- * R profile CPUs have an MPU, but can use the same set of MMU indexes
- * as A profile. They only need to distinguish EL0 and EL1 (and
- * EL2 for cores like the Cortex-R52).
- *
- * M profile CPUs are rather different as they do not have a true MMU.
- * They have the following different MMU indexes:
- * User
- * Privileged
- * User, execution priority negative (ie the MPU HFNMIENA bit may apply)
- * Privileged, execution priority negative (ditto)
- * If the CPU supports the v8M Security Extension then there are also:
- * Secure User
- * Secure Privileged
- * Secure User, execution priority negative
- * Secure Privileged, execution priority negative
- *
- * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
- * are not quite the same -- different CPU types (most notably M profile
- * vs A/R profile) would like to use MMU indexes with different semantics,
- * but since we don't ever need to use all of those in a single CPU we
- * can avoid having to set NB_MMU_MODES to "total number of A profile MMU
- * modes + total number of M profile MMU modes". The lower bits of
- * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
- * the same for any particular CPU.
- * Variables of type ARMMUIdx are always full values, and the core
- * index values are in variables of type 'int'.
- *
- * Our enumeration includes at the end some entries which are not "true"
- * mmu_idx values in that they don't have corresponding TLBs and are only
- * valid for doing slow path page table walks.
- *
- * The constant names here are patterned after the general style of the names
- * of the AT/ATS operations.
- * The values used are carefully arranged to make mmu_idx => EL lookup easy.
- * For M profile we arrange them to have a bit for priv, a bit for negpri
- * and a bit for secure.
- */
-#define ARM_MMU_IDX_A 0x10 /* A profile */
-#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
-#define ARM_MMU_IDX_M 0x40 /* M profile */
-
-/* Meanings of the bits for M profile mmu idx values */
-#define ARM_MMU_IDX_M_PRIV 0x1
-#define ARM_MMU_IDX_M_NEGPRI 0x2
-#define ARM_MMU_IDX_M_S 0x4 /* Secure */
-
-#define ARM_MMU_IDX_TYPE_MASK \
- (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB)
-#define ARM_MMU_IDX_COREIDX_MASK 0xf
-
-typedef enum ARMMMUIdx {
- /*
- * A-profile.
- */
- ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A,
- ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A,
- ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
- ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
- ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
- ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
-
- /*
- * Used for second stage of an S12 page table walk, or for descriptor
- * loads during first stage of an S1 page table walk. Note that both
- * are in use simultaneously for SecureEL2: the security state for
- * the S2 ptw is selected by the NS bit from the S1 ptw.
- */
- ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
-
- /* TLBs with 1-1 mapping to the physical address spaces. */
- ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
-
- /*
- * These are not allocated TLBs and are used only for AT system
- * instructions or for the first stage of an S12 page table walk.
- */
- ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB,
- ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB,
-
- /*
- * M-profile.
- */
- ARMMMUIdx_MUser = ARM_MMU_IDX_M,
- ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV,
- ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI,
- ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI,
- ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S,
- ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S,
- ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S,
- ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S,
-} ARMMMUIdx;
-
-/*
- * Bit macros for the core-mmu-index values for each index,
- * for use when calling tlb_flush_by_mmuidx() and friends.
- */
-#define TO_CORE_BIT(NAME) \
- ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK)
-
-typedef enum ARMMMUIdxBit {
- TO_CORE_BIT(E10_0),
- TO_CORE_BIT(E20_0),
- TO_CORE_BIT(E10_1),
- TO_CORE_BIT(E10_1_PAN),
- TO_CORE_BIT(E2),
- TO_CORE_BIT(E20_2),
- TO_CORE_BIT(E20_2_PAN),
- TO_CORE_BIT(E3),
- TO_CORE_BIT(E30_0),
- TO_CORE_BIT(E30_3_PAN),
- TO_CORE_BIT(Stage2),
- TO_CORE_BIT(Stage2_S),
-
- TO_CORE_BIT(MUser),
- TO_CORE_BIT(MPriv),
- TO_CORE_BIT(MUserNegPri),
- TO_CORE_BIT(MPrivNegPri),
- TO_CORE_BIT(MSUser),
- TO_CORE_BIT(MSPriv),
- TO_CORE_BIT(MSUserNegPri),
- TO_CORE_BIT(MSPrivNegPri),
-} ARMMMUIdxBit;
-
-#undef TO_CORE_BIT
-
-#define MMU_USER_IDX 0
-
/* Indexes used when registering address spaces with cpu_address_space_init */
typedef enum ARMASIdx {
ARMASIdx_NS = 0,
@@ -2948,7 +2356,7 @@ static inline bool arm_v7m_csselr_razwi(ARMCPU *cpu)
/* If all the CLIDR.Ctypem bits are 0 there are no caches, and
* CSSELR is RAZ/WI.
*/
- return (cpu->clidr & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
+ return (GET_IDREG(&cpu->isar, CLIDR) & R_V7M_CLIDR_CTYPE_ALL_MASK) != 0;
}
static inline bool arm_sctlr_b(CPUARMState *env)
@@ -2966,19 +2374,15 @@ static inline bool arm_sctlr_b(CPUARMState *env)
uint64_t arm_sctlr(CPUARMState *env, int el);
-#include "exec/cpu-all.h"
-
/*
* We have more than 32-bits worth of state per TB, so we split the data
* between tb->flags and tb->cs_base, which is otherwise unused for ARM.
* We collect these two parts in CPUARMTBFlags where they are named
* flags and flags2 respectively.
*
- * The flags that are shared between all execution modes, TBFLAG_ANY,
- * are stored in flags. The flags that are specific to a given mode
- * are stores in flags2. Since cs_base is sized on the configured
- * address size, flags2 always has 64-bits for A64, and a minimum of
- * 32-bits for A32 and M32.
+ * The flags that are shared between all execution modes, TBFLAG_ANY, are stored
+ * in flags. The flags that are specific to a given mode are stored in flags2.
+ * flags2 always has 64-bits, even though only 32-bits are used for A32 and M32.
*
* The bits for 32-bit A-profile and M-profile partially overlap:
*
@@ -3016,13 +2420,6 @@ FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */
*/
FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */
FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */
-/*
- * We store the bottom two bits of the CPAR as TB flags and handle
- * checks on the other bits at runtime. This shares the same bits as
- * VECSTRIDE, which is OK as no XScale CPU has VFP.
- * Not cached, because VECLEN+VECSTRIDE are not cached.
- */
-FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2)
FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */
FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */
FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1)
@@ -3084,12 +2481,15 @@ FIELD(TBFLAG_A64, ATA0, 31, 1)
FIELD(TBFLAG_A64, NV, 32, 1)
FIELD(TBFLAG_A64, NV1, 33, 1)
FIELD(TBFLAG_A64, NV2, 34, 1)
-/* Set if FEAT_NV2 RAM accesses use the EL2&0 translation regime */
-FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1)
+FIELD(TBFLAG_A64, E2H, 35, 1)
/* Set if FEAT_NV2 RAM accesses are big-endian */
FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */
FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */
+FIELD(TBFLAG_A64, ZT0EXC_EL, 39, 2)
+FIELD(TBFLAG_A64, GCS_EN, 41, 1)
+FIELD(TBFLAG_A64, GCS_RVCEN, 42, 1)
+FIELD(TBFLAG_A64, GCSSTR_EL, 43, 2)
/*
* Helpers for using the above. Note that only the A64 accessors use
@@ -3151,9 +2551,6 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
QEMU_PSCI_CONDUIT_SMC = 1,
@@ -3245,41 +2642,9 @@ extern const uint64_t pred_esz_masks[5];
*/
#define PAGE_BTI PAGE_TARGET_1
#define PAGE_MTE PAGE_TARGET_2
-#define PAGE_TARGET_STICKY PAGE_MTE
/* We associate one allocation tag per 16 bytes, the minimum. */
#define LOG2_TAG_GRANULE 4
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
-#ifdef CONFIG_USER_ONLY
-
-#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
-
-#ifdef TARGET_TAGGED_ADDRESSES
-/**
- * cpu_untagged_addr:
- * @cs: CPU context
- * @x: tagged address
- *
- * Remove any address tag from @x. This is explicitly related to the
- * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
- *
- * There should be a better place to put this, but we need this in
- * include/exec/cpu_ldst.h, and not some place linux-user specific.
- */
-static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
-{
- CPUARMState *env = cpu_env(cs);
- if (env->tagged_addr_enable) {
- /*
- * TBI is enabled for userspace but not kernelspace addresses.
- * Only clear the tag if bit 55 is clear.
- */
- x &= sextract64(x, 0, 56);
- }
- return x;
-}
-#endif /* TARGET_TAGGED_ADDRESSES */
-#endif /* CONFIG_USER_ONLY */
-
#endif