aboutsummaryrefslogtreecommitdiff
path: root/target/arm/cpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/cpu.h')
-rw-r--r--target/arm/cpu.h405
1 files changed, 124 insertions, 281 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index a12859f..302c24e 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -24,17 +24,15 @@
#include "qemu/cpu-float.h"
#include "hw/registerfields.h"
#include "cpu-qom.h"
+#include "exec/cpu-common.h"
#include "exec/cpu-defs.h"
+#include "exec/cpu-interrupt.h"
#include "exec/gdbstub.h"
#include "exec/page-protection.h"
#include "qapi/qapi-types-common.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
-#ifdef TARGET_AARCH64
-#define KVM_HAVE_MCE_INJECTION 1
-#endif
-
#define EXCP_UDEF 1 /* undefined instruction */
#define EXCP_SWI 2 /* software interrupt */
#define EXCP_PREFETCH_ABORT 3
@@ -62,6 +60,7 @@
#define EXCP_NMI 26
#define EXCP_VINMI 27
#define EXCP_VFNMI 28
+#define EXCP_MON_TRAP 29 /* AArch32 trap to Monitor mode */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
@@ -99,12 +98,6 @@
#define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
#endif
-/* ARM-specific extra insn start words:
- * 1: Conditional execution bits
- * 2: Partial exception syndrome for data aborts
- */
-#define TARGET_INSN_START_EXTRA_WORDS 2
-
/* The 2nd extra word holding syndrome info for data aborts does not use
* the upper 6 bits nor the lower 13 bits. We mask and shift it down to
* help the sleb128 encoder do a better job.
@@ -170,17 +163,12 @@ typedef struct ARMGenericTimer {
* Align the data for use with TCG host vector operations.
*/
-#ifdef TARGET_AARCH64
-# define ARM_MAX_VQ 16
-#else
-# define ARM_MAX_VQ 1
-#endif
+#define ARM_MAX_VQ 16
typedef struct ARMVectorReg {
uint64_t d[2 * ARM_MAX_VQ] QEMU_ALIGNED(16);
} ARMVectorReg;
-#ifdef TARGET_AARCH64
/* In AArch32 mode, predicate registers do not exist at all. */
typedef struct ARMPredicateReg {
uint64_t p[DIV_ROUND_UP(2 * ARM_MAX_VQ, 8)] QEMU_ALIGNED(16);
@@ -190,18 +178,72 @@ typedef struct ARMPredicateReg {
typedef struct ARMPACKey {
uint64_t lo, hi;
} ARMPACKey;
-#endif
/* See the commentary above the TBFLAG field definitions. */
typedef struct CPUARMTBFlags {
uint32_t flags;
- target_ulong flags2;
+ uint64_t flags2;
} CPUARMTBFlags;
typedef struct ARMMMUFaultInfo ARMMMUFaultInfo;
typedef struct NVICState NVICState;
+/*
+ * Enum for indexing vfp.fp_status[].
+ *
+ * FPST_A32: is the "normal" fp status for AArch32 insns
+ * FPST_A64: is the "normal" fp status for AArch64 insns
+ * FPST_A32_F16: used for AArch32 half-precision calculations
+ * FPST_A64_F16: used for AArch64 half-precision calculations
+ * FPST_STD: the ARM "Standard FPSCR Value"
+ * FPST_STD_F16: used for half-precision
+ * calculations with the ARM "Standard FPSCR Value"
+ * FPST_AH: used for the A64 insns which change behaviour
+ * when FPCR.AH == 1 (bfloat16 conversions and multiplies,
+ * and the reciprocal and square root estimate/step insns)
+ * FPST_AH_F16: used for the A64 insns which change behaviour
+ * when FPCR.AH == 1 (bfloat16 conversions and multiplies,
+ * and the reciprocal and square root estimate/step insns);
+ * for half-precision
+ *
+ * Half-precision operations are governed by a separate
+ * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
+ * status structure to control this.
+ *
+ * The "Standard FPSCR", ie default-NaN, flush-to-zero,
+ * round-to-nearest and is used by any operations (generally
+ * Neon) which the architecture defines as controlled by the
+ * standard FPSCR value rather than the FPSCR.
+ *
+ * The "standard FPSCR but for fp16 ops" is needed because
+ * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than
+ * using a fixed value for it.
+ *
+ * FPST_AH is needed because some insns have different
+ * behaviour when FPCR.AH == 1: they don't update cumulative
+ * exception flags, they act like FPCR.{FZ,FIZ} = {1,1} and
+ * they ignore FPCR.RMode. But they don't ignore FPCR.FZ16,
+ * which means we need an FPST_AH_F16 as well.
+ *
+ * To avoid having to transfer exception bits around, we simply
+ * say that the FPSCR cumulative exception flags are the logical
+ * OR of the flags in the four fp statuses. This relies on the
+ * only thing which needs to read the exception flags being
+ * an explicit FPSCR read.
+ */
+typedef enum ARMFPStatusFlavour {
+ FPST_A32,
+ FPST_A64,
+ FPST_A32_F16,
+ FPST_A64_F16,
+ FPST_AH,
+ FPST_AH_F16,
+ FPST_STD,
+ FPST_STD_F16,
+} ARMFPStatusFlavour;
+#define FPST_COUNT 8
+
typedef struct CPUArchState {
/* Regs for current mode. */
uint32_t regs[16];
@@ -606,13 +648,11 @@ typedef struct CPUArchState {
struct {
ARMVectorReg zregs[32];
-#ifdef TARGET_AARCH64
/* Store FFR as pregs[16] to make it easier to treat as any other. */
#define FFR_PRED_NUM 16
ARMPredicateReg pregs[17];
/* Scratch space for aa64 sve predicate temporary. */
ARMPredicateReg preg_tmp;
-#endif
/* We store these fpcsr fields separately for convenience. */
uint32_t qc[4] QEMU_ALIGNED(16);
@@ -631,37 +671,8 @@ typedef struct CPUArchState {
/* Scratch space for aa32 neon expansion. */
uint32_t scratch[8];
- /* There are a number of distinct float control structures:
- *
- * fp_status: is the "normal" fp status.
- * fp_status_fp16: used for half-precision calculations
- * standard_fp_status : the ARM "Standard FPSCR Value"
- * standard_fp_status_fp16 : used for half-precision
- * calculations with the ARM "Standard FPSCR Value"
- *
- * Half-precision operations are governed by a separate
- * flush-to-zero control bit in FPSCR:FZ16. We pass a separate
- * status structure to control this.
- *
- * The "Standard FPSCR", ie default-NaN, flush-to-zero,
- * round-to-nearest and is used by any operations (generally
- * Neon) which the architecture defines as controlled by the
- * standard FPSCR value rather than the FPSCR.
- *
- * The "standard FPSCR but for fp16 ops" is needed because
- * the "standard FPSCR" tracks the FPSCR.FZ16 bit rather than
- * using a fixed value for it.
- *
- * To avoid having to transfer exception bits around, we simply
- * say that the FPSCR cumulative exception flags are the logical
- * OR of the flags in the four fp statuses. This relies on the
- * only thing which needs to read the exception flags being
- * an explicit FPSCR read.
- */
- float_status fp_status;
- float_status fp_status_f16;
- float_status standard_fp_status;
- float_status standard_fp_status_f16;
+ /* There are a number of distinct float control structures. */
+ float_status fp_status[FPST_COUNT];
uint64_t zcr_el[4]; /* ZCR_EL[1-3] */
uint64_t smcr_el[4]; /* SMCR_EL[1-3] */
@@ -686,7 +697,6 @@ typedef struct CPUArchState {
uint32_t cregs[16];
} iwmmxt;
-#ifdef TARGET_AARCH64
struct {
ARMPACKey apia;
ARMPACKey apib;
@@ -718,7 +728,6 @@ typedef struct CPUArchState {
* to keep the offsets into the rest of the structure smaller.
*/
ARMVectorReg zarray[ARM_MAX_VQ * 16];
-#endif
struct CPUBreakpoint *cpu_breakpoint[16];
struct CPUWatchpoint *cpu_watchpoint[16];
@@ -774,12 +783,9 @@ typedef struct CPUArchState {
#else /* CONFIG_USER_ONLY */
/* For usermode syscall translation. */
bool eabi;
-#endif /* CONFIG_USER_ONLY */
-
-#ifdef TARGET_TAGGED_ADDRESSES
/* Linux syscall tagged address support */
bool tagged_addr_enable;
-#endif
+#endif /* CONFIG_USER_ONLY */
} CPUARMState;
static inline void set_feature(CPUARMState *env, int feature)
@@ -922,6 +928,8 @@ struct ArchCPU {
/* CPU has memory protection unit */
bool has_mpu;
+ /* CPU has MTE enabled in KVM mode */
+ bool kvm_mte;
/* PMSAv7 MPU number of supported regions */
uint32_t pmsav7_dregion;
/* PMSAv8 MPU number of supported hyp regions */
@@ -944,7 +952,6 @@ struct ArchCPU {
*/
uint32_t kvm_target;
-#ifdef CONFIG_KVM
/* KVM init features for this CPU */
uint32_t kvm_init_features[7];
@@ -957,7 +964,6 @@ struct ArchCPU {
/* KVM steal time */
OnOffAuto kvm_steal_time;
-#endif /* CONFIG_KVM */
/* Uniprocessor system with MP extensions */
bool mp_is_up;
@@ -970,6 +976,9 @@ struct ArchCPU {
/* QOM property to indicate we should use the back-compat CNTFRQ default */
bool backcompat_cntfrq;
+ /* QOM property to indicate we should use the back-compat QARMA5 default */
+ bool backcompat_pauth_default_use_qarma5;
+
/* Specify the number of cores in this CPU cluster. Used for the L2CTLR
* register.
*/
@@ -1060,6 +1069,7 @@ struct ArchCPU {
bool prop_pauth;
bool prop_pauth_impdef;
bool prop_pauth_qarma3;
+ bool prop_pauth_qarma5;
bool prop_lpa2;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
@@ -1108,8 +1118,9 @@ struct ArchCPU {
typedef struct ARMCPUInfo {
const char *name;
+ const char *deprecation_note;
void (*initfn)(Object *obj);
- void (*class_init)(ObjectClass *oc, void *data);
+ void (*class_init)(ObjectClass *oc, const void *data);
} ARMCPUInfo;
/**
@@ -1127,16 +1138,14 @@ struct ARMCPUClass {
ResettablePhases parent_phases;
};
-struct AArch64CPUClass {
- ARMCPUClass parent_class;
-};
-
/* Callback functions for the generic timer's timers. */
void arm_gt_ptimer_cb(void *opaque);
void arm_gt_vtimer_cb(void *opaque);
void arm_gt_htimer_cb(void *opaque);
void arm_gt_stimer_cb(void *opaque);
void arm_gt_hvtimer_cb(void *opaque);
+void arm_gt_sel2timer_cb(void *opaque);
+void arm_gt_sel2vtimer_cb(void *opaque);
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu);
void gt_rme_post_el_change(ARMCPU *cpu, void *opaque);
@@ -1200,7 +1209,6 @@ int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
*/
void arm_emulate_firmware_reset(CPUState *cpustate, int target_el);
-#ifdef TARGET_AARCH64
int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
@@ -1232,13 +1240,6 @@ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr)
#endif
}
-#else
-static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
-static inline void aarch64_sve_change_el(CPUARMState *env, int o,
- int n, bool a)
-{ }
-#endif
-
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
@@ -1365,6 +1366,7 @@ void pmu_init(ARMCPU *cpu);
#define SCTLR_EnIB (1U << 30) /* v8.3, AArch64 only */
#define SCTLR_EnIA (1U << 31) /* v8.3, AArch64 only */
#define SCTLR_DSSBS_32 (1U << 31) /* v8.5, AArch32 only */
+#define SCTLR_CMOW (1ULL << 32) /* FEAT_CMOW */
#define SCTLR_MSCEN (1ULL << 33) /* FEAT_MOPS */
#define SCTLR_BT0 (1ULL << 35) /* v8.5-BTI */
#define SCTLR_BT1 (1ULL << 36) /* v8.5-BTI */
@@ -1702,11 +1704,15 @@ void vfp_set_fpscr(CPUARMState *env, uint32_t val);
*/
/* FPCR bits */
+#define FPCR_FIZ (1 << 0) /* Flush Inputs to Zero (FEAT_AFP) */
+#define FPCR_AH (1 << 1) /* Alternate Handling (FEAT_AFP) */
+#define FPCR_NEP (1 << 2) /* SIMD scalar ops preserve elts (FEAT_AFP) */
#define FPCR_IOE (1 << 8) /* Invalid Operation exception trap enable */
#define FPCR_DZE (1 << 9) /* Divide by Zero exception trap enable */
#define FPCR_OFE (1 << 10) /* Overflow exception trap enable */
#define FPCR_UFE (1 << 11) /* Underflow exception trap enable */
#define FPCR_IXE (1 << 12) /* Inexact exception trap enable */
+#define FPCR_EBF (1 << 13) /* Extended BFloat16 behaviors */
#define FPCR_IDE (1 << 15) /* Input Denormal exception trap enable */
#define FPCR_LEN_MASK (7 << 16) /* LEN, A-profile only */
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
@@ -2557,6 +2563,11 @@ static inline bool arm_is_secure_below_el3(CPUARMState *env)
return false;
}
+static inline bool arm_is_el3_or_mon(CPUARMState *env)
+{
+ return false;
+}
+
static inline ARMSecuritySpace arm_security_space(CPUARMState *env)
{
return ARMSS_NonSecure;
@@ -2589,81 +2600,15 @@ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space);
uint64_t arm_hcr_el2_eff(CPUARMState *env);
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
-/* Return true if the specified exception level is running in AArch64 state. */
-static inline bool arm_el_is_aa64(CPUARMState *env, int el)
-{
- /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
- * and if we're not in EL0 then the state of EL0 isn't well defined.)
- */
- assert(el >= 1 && el <= 3);
- bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
-
- /* The highest exception level is always at the maximum supported
- * register width, and then lower levels have a register width controlled
- * by bits in the SCR or HCR registers.
- */
- if (el == 3) {
- return aa64;
- }
-
- if (arm_feature(env, ARM_FEATURE_EL3) &&
- ((env->cp15.scr_el3 & SCR_NS) || !(env->cp15.scr_el3 & SCR_EEL2))) {
- aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
- }
-
- if (el == 2) {
- return aa64;
- }
-
- if (arm_is_el2_enabled(env)) {
- aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
- }
-
- return aa64;
-}
-
-/* Function for determining whether guest cp register reads and writes should
+/*
+ * Function for determining whether guest cp register reads and writes should
* access the secure or non-secure bank of a cp register. When EL3 is
* operating in AArch32 state, the NS-bit determines whether the secure
* instance of a cp register should be used. When EL3 is AArch64 (or if
* it doesn't exist at all) then there is no register banking, and all
* accesses are to the non-secure version.
*/
-static inline bool access_secure_reg(CPUARMState *env)
-{
- bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
- !arm_el_is_aa64(env, 3) &&
- !(env->cp15.scr_el3 & SCR_NS));
-
- return ret;
-}
-
-/* Macros for accessing a specified CP register bank */
-#define A32_BANKED_REG_GET(_env, _regname, _secure) \
- ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
-
-#define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
- do { \
- if (_secure) { \
- (_env)->cp15._regname##_s = (_val); \
- } else { \
- (_env)->cp15._regname##_ns = (_val); \
- } \
- } while (0)
-
-/* Macros for automatically accessing a specific CP register bank depending on
- * the current secure state of the system. These macros are not intended for
- * supporting instruction translation reads/writes as these are dependent
- * solely on the SCR.NS bit and not the mode.
- */
-#define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
- A32_BANKED_REG_GET((_env), _regname, \
- (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
-
-#define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
- A32_BANKED_REG_SET((_env), _regname, \
- (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
- (_val))
+bool access_secure_reg(CPUARMState *env);
uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure);
@@ -2686,39 +2631,6 @@ static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
return env->v7m.exception != 0;
}
-/* Return the current Exception Level (as per ARMv8; note that this differs
- * from the ARMv7 Privilege Level).
- */
-static inline int arm_current_el(CPUARMState *env)
-{
- if (arm_feature(env, ARM_FEATURE_M)) {
- return arm_v7m_is_handler_mode(env) ||
- !(env->v7m.control[env->v7m.secure] & 1);
- }
-
- if (is_a64(env)) {
- return extract32(env->pstate, 2, 2);
- }
-
- switch (env->uncached_cpsr & 0x1f) {
- case ARM_CPU_MODE_USR:
- return 0;
- case ARM_CPU_MODE_HYP:
- return 2;
- case ARM_CPU_MODE_MON:
- return 3;
- default:
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
- /* If EL3 is 32-bit then all secure privileged modes run in
- * EL3
- */
- return 3;
- }
-
- return 1;
- }
-}
-
/**
* write_list_to_cpustate
* @cpu: ARMCPU
@@ -2772,14 +2684,19 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* + NonSecure EL1 & 0 stage 2
* + NonSecure EL2
* + NonSecure EL2 & 0 (ARMv8.1-VHE)
- * + Secure EL1 & 0
- * + Secure EL3
+ * + Secure EL1 & 0 stage 1
+ * + Secure EL1 & 0 stage 2 (FEAT_SEL2)
+ * + Secure EL2 (FEAT_SEL2)
+ * + Secure EL2 & 0 (FEAT_SEL2)
+ * + Realm EL1 & 0 stage 1 (FEAT_RME)
+ * + Realm EL1 & 0 stage 2 (FEAT_RME)
+ * + Realm EL2 (FEAT_RME)
+ * + EL3
* If EL3 is 32-bit:
* + NonSecure PL1 & 0 stage 1
* + NonSecure PL1 & 0 stage 2
* + NonSecure PL2
- * + Secure PL0
- * + Secure PL1
+ * + Secure PL1 & 0
* (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
*
* For QEMU, an mmu_idx is not quite the same as a translation regime because:
@@ -2805,29 +2722,34 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync);
* table over and over.
* 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access
* Never (PAN) bit within PSTATE.
- * 7. we fold together the secure and non-secure regimes for A-profile,
+ * 7. we fold together most secure and non-secure regimes for A-profile,
* because there are no banked system registers for aarch64, so the
* process of switching between secure and non-secure is
* already heavyweight.
+ * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure,
+ * because both are in use simultaneously for Secure EL2.
*
* This gives us the following list of cases:
*
- * EL0 EL1&0 stage 1+2 (aka NS PL0)
- * EL1 EL1&0 stage 1+2 (aka NS PL1)
- * EL1 EL1&0 stage 1+2 +PAN
+ * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2)
+ * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN)
* EL0 EL2&0
* EL2 EL2&0
* EL2 EL2&0 +PAN
* EL2 (aka NS PL2)
- * EL3 (aka S PL1)
- * Physical (NS & S)
- * Stage2 (NS & S)
+ * EL3 (aka AArch32 S PL1 PL1&0)
+ * AArch32 S PL0 PL1&0 (we call this EL30_0)
+ * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN)
+ * Stage2 Secure
+ * Stage2 NonSecure
+ * plus one TLB per Physical address space: S, NS, Realm, Root
*
- * for a total of 12 different mmu_idx.
+ * for a total of 16 different mmu_idx.
*
* R profile CPUs have an MPU, but can use the same set of MMU indexes
* as A profile. They only need to distinguish EL0 and EL1 (and
- * EL2 if we ever model a Cortex-R52).
+ * EL2 for cores like the Cortex-R52).
*
* M profile CPUs are rather different as they do not have a true MMU.
* They have the following different MMU indexes:
@@ -2887,6 +2809,8 @@ typedef enum ARMMMUIdx {
ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A,
ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A,
ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A,
+ ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A,
/*
* Used for second stage of an S12 page table walk, or for descriptor
@@ -2894,14 +2818,14 @@ typedef enum ARMMMUIdx {
* are in use simultaneously for SecureEL2: the security state for
* the S2 ptw is selected by the NS bit from the S1 ptw.
*/
- ARMMMUIdx_Stage2_S = 8 | ARM_MMU_IDX_A,
- ARMMMUIdx_Stage2 = 9 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A,
/* TLBs with 1-1 mapping to the physical address spaces. */
- ARMMMUIdx_Phys_S = 10 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_NS = 11 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Root = 12 | ARM_MMU_IDX_A,
- ARMMMUIdx_Phys_Realm = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A,
+ ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A,
/*
* These are not allocated TLBs and are used only for AT system
@@ -2940,6 +2864,8 @@ typedef enum ARMMMUIdxBit {
TO_CORE_BIT(E20_2),
TO_CORE_BIT(E20_2_PAN),
TO_CORE_BIT(E3),
+ TO_CORE_BIT(E30_0),
+ TO_CORE_BIT(E30_3_PAN),
TO_CORE_BIT(Stage2),
TO_CORE_BIT(Stage2_S),
@@ -3005,60 +2931,15 @@ static inline bool arm_sctlr_b(CPUARMState *env)
uint64_t arm_sctlr(CPUARMState *env, int el);
-static inline bool arm_cpu_data_is_big_endian_a32(CPUARMState *env,
- bool sctlr_b)
-{
-#ifdef CONFIG_USER_ONLY
- /*
- * In system mode, BE32 is modelled in line with the
- * architecture (as word-invariant big-endianness), where loads
- * and stores are done little endian but from addresses which
- * are adjusted by XORing with the appropriate constant. So the
- * endianness to use for the raw data access is not affected by
- * SCTLR.B.
- * In user mode, however, we model BE32 as byte-invariant
- * big-endianness (because user-only code cannot tell the
- * difference), and so we need to use a data access endianness
- * that depends on SCTLR.B.
- */
- if (sctlr_b) {
- return true;
- }
-#endif
- /* In 32bit endianness is determined by looking at CPSR's E bit */
- return env->uncached_cpsr & CPSR_E;
-}
-
-static inline bool arm_cpu_data_is_big_endian_a64(int el, uint64_t sctlr)
-{
- return sctlr & (el ? SCTLR_EE : SCTLR_E0E);
-}
-
-/* Return true if the processor is in big-endian mode. */
-static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
-{
- if (!is_a64(env)) {
- return arm_cpu_data_is_big_endian_a32(env, arm_sctlr_b(env));
- } else {
- int cur_el = arm_current_el(env);
- uint64_t sctlr = arm_sctlr(env, cur_el);
- return arm_cpu_data_is_big_endian_a64(cur_el, sctlr);
- }
-}
-
-#include "exec/cpu-all.h"
-
/*
* We have more than 32-bits worth of state per TB, so we split the data
* between tb->flags and tb->cs_base, which is otherwise unused for ARM.
* We collect these two parts in CPUARMTBFlags where they are named
* flags and flags2 respectively.
*
- * The flags that are shared between all execution modes, TBFLAG_ANY,
- * are stored in flags. The flags that are specific to a given mode
- * are stores in flags2. Since cs_base is sized on the configured
- * address size, flags2 always has 64-bits for A64, and a minimum of
- * 32-bits for A32 and M32.
+ * The flags that are shared between all execution modes, TBFLAG_ANY, are stored
+ * in flags. The flags that are specific to a given mode are stored in flags2.
+ * flags2 always has 64-bits, even though only 32-bits are used for A32 and M32.
*
* The bits for 32-bit A-profile and M-profile partially overlap:
*
@@ -3168,6 +3049,8 @@ FIELD(TBFLAG_A64, NV2, 34, 1)
FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1)
/* Set if FEAT_NV2 RAM accesses are big-endian */
FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1)
+FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */
+FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */
/*
* Helpers for using the above. Note that only the A64 accessors use
@@ -3229,16 +3112,6 @@ static inline bool bswap_code(bool sctlr_b)
#endif
}
-#ifdef CONFIG_USER_ONLY
-static inline bool arm_cpu_bswap_data(CPUARMState *env)
-{
- return TARGET_BIG_ENDIAN ^ arm_cpu_data_is_big_endian(env);
-}
-#endif
-
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *flags);
-
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
QEMU_PSCI_CONDUIT_SMC = 1,
@@ -3336,34 +3209,4 @@ extern const uint64_t pred_esz_masks[5];
#define LOG2_TAG_GRANULE 4
#define TAG_GRANULE (1 << LOG2_TAG_GRANULE)
-#ifdef CONFIG_USER_ONLY
-#define TARGET_PAGE_DATA_SIZE (TARGET_PAGE_SIZE >> (LOG2_TAG_GRANULE + 1))
-#endif
-
-#ifdef TARGET_TAGGED_ADDRESSES
-/**
- * cpu_untagged_addr:
- * @cs: CPU context
- * @x: tagged address
- *
- * Remove any address tag from @x. This is explicitly related to the
- * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
- *
- * There should be a better place to put this, but we need this in
- * include/exec/cpu_ldst.h, and not some place linux-user specific.
- */
-static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x)
-{
- CPUARMState *env = cpu_env(cs);
- if (env->tagged_addr_enable) {
- /*
- * TBI is enabled for userspace but not kernelspace addresses.
- * Only clear the tag if bit 55 is clear.
- */
- x &= sextract64(x, 0, 56);
- }
- return x;
-}
-#endif
-
#endif