aboutsummaryrefslogtreecommitdiff
path: root/target/arm/helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/helper.c')
-rw-r--r--target/arm/helper.c3583
1 files changed, 995 insertions, 2588 deletions
diff --git a/target/arm/helper.c b/target/arm/helper.c
index bb445e3..167f290 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -12,51 +12,85 @@
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
-#include "exec/helper-proto.h"
#include "exec/page-protection.h"
+#include "exec/mmap-lock.h"
#include "qemu/main-loop.h"
#include "qemu/timer.h"
#include "qemu/bitops.h"
#include "qemu/qemu-print.h"
#include "exec/cputlb.h"
-#include "exec/exec-all.h"
#include "exec/translation-block.h"
#include "hw/irq.h"
#include "system/cpu-timers.h"
+#include "exec/icount.h"
#include "system/kvm.h"
#include "system/tcg.h"
#include "qapi/error.h"
#include "qemu/guest-random.h"
#ifdef CONFIG_TCG
+#include "accel/tcg/probe.h"
+#include "accel/tcg/getpc.h"
#include "semihosting/common-semi.h"
#endif
#include "cpregs.h"
#include "target/arm/gtimer.h"
-#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
+#define HELPER_H "tcg/helper.h"
+#include "exec/helper-proto.h.inc"
static void switch_mode(CPUARMState *env, int mode);
-static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
+int compare_u64(const void *a, const void *b)
+{
+ if (*(uint64_t *)a > *(uint64_t *)b) {
+ return 1;
+ }
+ if (*(uint64_t *)a < *(uint64_t *)b) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Macros which are lvalues for the field in CPUARMState for the
+ * ARMCPRegInfo *ri.
+ */
+#define CPREG_FIELD32(env, ri) \
+ (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
+#define CPREG_FIELD64(env, ri) \
+ (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
+
+uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
assert(ri->fieldoffset);
- if (cpreg_field_is_64bit(ri)) {
+ switch (cpreg_field_type(ri)) {
+ case MO_64:
return CPREG_FIELD64(env, ri);
- } else {
+ case MO_32:
return CPREG_FIELD32(env, ri);
+ default:
+ g_assert_not_reached();
}
}
void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
assert(ri->fieldoffset);
- if (cpreg_field_is_64bit(ri)) {
+ switch (cpreg_field_type(ri)) {
+ case MO_64:
CPREG_FIELD64(env, ri) = value;
- } else {
+ break;
+ case MO_32:
CPREG_FIELD32(env, ri) = value;
+ break;
+ default:
+ g_assert_not_reached();
}
}
+#undef CPREG_FIELD32
+#undef CPREG_FIELD64
+
static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri)
{
return (char *)env + ri->fieldoffset;
@@ -195,11 +229,11 @@ bool write_list_to_cpustate(ARMCPU *cpu)
return ok;
}
-static void add_cpreg_to_list(gpointer key, gpointer opaque)
+static void add_cpreg_to_list(gpointer key, gpointer value, gpointer opaque)
{
ARMCPU *cpu = opaque;
uint32_t regidx = (uintptr_t)key;
- const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx);
+ const ARMCPRegInfo *ri = value;
if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx);
@@ -208,64 +242,52 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque)
}
}
-static void count_cpreg(gpointer key, gpointer opaque)
+static void count_cpreg(gpointer key, gpointer value, gpointer opaque)
{
ARMCPU *cpu = opaque;
- const ARMCPRegInfo *ri;
-
- ri = g_hash_table_lookup(cpu->cp_regs, key);
+ const ARMCPRegInfo *ri = value;
if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) {
cpu->cpreg_array_len++;
}
}
-static gint cpreg_key_compare(gconstpointer a, gconstpointer b)
-{
- uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a);
- uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b);
-
- if (aidx > bidx) {
- return 1;
- }
- if (aidx < bidx) {
- return -1;
- }
- return 0;
-}
-
void init_cpreg_list(ARMCPU *cpu)
{
/*
* Initialise the cpreg_tuples[] array based on the cp_regs hash.
* Note that we require cpreg_tuples[] to be sorted by key ID.
*/
- GList *keys;
int arraylen;
- keys = g_hash_table_get_keys(cpu->cp_regs);
- keys = g_list_sort(keys, cpreg_key_compare);
-
cpu->cpreg_array_len = 0;
-
- g_list_foreach(keys, count_cpreg, cpu);
+ g_hash_table_foreach(cpu->cp_regs, count_cpreg, cpu);
arraylen = cpu->cpreg_array_len;
- cpu->cpreg_indexes = g_new(uint64_t, arraylen);
- cpu->cpreg_values = g_new(uint64_t, arraylen);
- cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
- cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
- cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
+ if (arraylen) {
+ cpu->cpreg_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_values = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen);
+ cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen);
+ } else {
+ cpu->cpreg_indexes = NULL;
+ cpu->cpreg_values = NULL;
+ cpu->cpreg_vmstate_indexes = NULL;
+ cpu->cpreg_vmstate_values = NULL;
+ }
+ cpu->cpreg_vmstate_array_len = arraylen;
cpu->cpreg_array_len = 0;
- g_list_foreach(keys, add_cpreg_to_list, cpu);
+ g_hash_table_foreach(cpu->cp_regs, add_cpreg_to_list, cpu);
assert(cpu->cpreg_array_len == arraylen);
- g_list_free(keys);
+ if (arraylen) {
+ qsort(cpu->cpreg_indexes, arraylen, sizeof(uint64_t), compare_u64);
+ }
}
-static bool arm_pan_enabled(CPUARMState *env)
+bool arm_pan_enabled(CPUARMState *env)
{
if (is_a64(env)) {
if ((arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1)) {
@@ -314,25 +336,6 @@ static CPAccessResult access_trap_aa32s_el1(CPUARMState *env,
return CP_ACCESS_UNDEFINED;
}
-/*
- * Check for traps to performance monitor registers, which are controlled
- * by MDCR_EL2.TPM for EL2 and MDCR_EL3.TPM for EL3.
- */
-static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- int el = arm_current_el(env);
- uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
- if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL2;
- }
- if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL3;
- }
- return CP_ACCESS_OK;
-}
-
/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
@@ -417,7 +420,9 @@ int alle1_tlbmask(CPUARMState *env)
*/
return (ARMMMUIdxBit_E10_1 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_Stage2 |
ARMMMUIdxBit_Stage2_S);
}
@@ -451,6 +456,8 @@ static const ARMCPRegInfo cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_CONTEXTIDR_EL1,
.nv2_redirect_offset = 0x108 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 1),
.secure = ARM_CP_SECSTATE_NS,
.fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]),
.resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, },
@@ -668,291 +675,16 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
*/
{ .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
- { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
+ { .name = "CPACR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
.crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access,
.fgt = FGT_CPACR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 1, 2),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 2),
.nv2_redirect_offset = 0x100 | NV2_REDIR_NV1,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1),
.resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read },
};
-typedef struct pm_event {
- uint16_t number; /* PMEVTYPER.evtCount is 16 bits wide */
- /* If the event is supported on this CPU (used to generate PMCEID[01]) */
- bool (*supported)(CPUARMState *);
- /*
- * Retrieve the current count of the underlying event. The programmed
- * counters hold a difference from the return value from this function
- */
- uint64_t (*get_count)(CPUARMState *);
- /*
- * Return how many nanoseconds it will take (at a minimum) for count events
- * to occur. A negative value indicates the counter will never overflow, or
- * that the counter has otherwise arranged for the overflow bit to be set
- * and the PMU interrupt to be raised on overflow.
- */
- int64_t (*ns_per_count)(uint64_t);
-} pm_event;
-
-static bool event_always_supported(CPUARMState *env)
-{
- return true;
-}
-
-static uint64_t swinc_get_count(CPUARMState *env)
-{
- /*
- * SW_INCR events are written directly to the pmevcntr's by writes to
- * PMSWINC, so there is no underlying count maintained by the PMU itself
- */
- return 0;
-}
-
-static int64_t swinc_ns_per(uint64_t ignored)
-{
- return -1;
-}
-
-/*
- * Return the underlying cycle count for the PMU cycle counters. If we're in
- * usermode, simply return 0.
- */
-static uint64_t cycles_get_count(CPUARMState *env)
-{
-#ifndef CONFIG_USER_ONLY
- return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- ARM_CPU_FREQ, NANOSECONDS_PER_SECOND);
-#else
- return cpu_get_host_ticks();
-#endif
-}
-
-#ifndef CONFIG_USER_ONLY
-static int64_t cycles_ns_per(uint64_t cycles)
-{
- return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles;
-}
-
-static bool instructions_supported(CPUARMState *env)
-{
- /* Precise instruction counting */
- return icount_enabled() == ICOUNT_PRECISE;
-}
-
-static uint64_t instructions_get_count(CPUARMState *env)
-{
- assert(icount_enabled() == ICOUNT_PRECISE);
- return (uint64_t)icount_get_raw();
-}
-
-static int64_t instructions_ns_per(uint64_t icount)
-{
- assert(icount_enabled() == ICOUNT_PRECISE);
- return icount_to_ns((int64_t)icount);
-}
-#endif
-
-static bool pmuv3p1_events_supported(CPUARMState *env)
-{
- /* For events which are supported in any v8.1 PMU */
- return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
-}
-
-static bool pmuv3p4_events_supported(CPUARMState *env)
-{
- /* For events which are supported in any v8.1 PMU */
- return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
-}
-
-static uint64_t zero_event_get_count(CPUARMState *env)
-{
- /* For events which on QEMU never fire, so their count is always zero */
- return 0;
-}
-
-static int64_t zero_event_ns_per(uint64_t cycles)
-{
- /* An event which never fires can never overflow */
- return -1;
-}
-
-static const pm_event pm_events[] = {
- { .number = 0x000, /* SW_INCR */
- .supported = event_always_supported,
- .get_count = swinc_get_count,
- .ns_per_count = swinc_ns_per,
- },
-#ifndef CONFIG_USER_ONLY
- { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */
- .supported = instructions_supported,
- .get_count = instructions_get_count,
- .ns_per_count = instructions_ns_per,
- },
- { .number = 0x011, /* CPU_CYCLES, Cycle */
- .supported = event_always_supported,
- .get_count = cycles_get_count,
- .ns_per_count = cycles_ns_per,
- },
-#endif
- { .number = 0x023, /* STALL_FRONTEND */
- .supported = pmuv3p1_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
- { .number = 0x024, /* STALL_BACKEND */
- .supported = pmuv3p1_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
- { .number = 0x03c, /* STALL */
- .supported = pmuv3p4_events_supported,
- .get_count = zero_event_get_count,
- .ns_per_count = zero_event_ns_per,
- },
-};
-
-/*
- * Note: Before increasing MAX_EVENT_ID beyond 0x3f into the 0x40xx range of
- * events (i.e. the statistical profiling extension), this implementation
- * should first be updated to something sparse instead of the current
- * supported_event_map[] array.
- */
-#define MAX_EVENT_ID 0x3c
-#define UNSUPPORTED_EVENT UINT16_MAX
-static uint16_t supported_event_map[MAX_EVENT_ID + 1];
-
-/*
- * Called upon CPU initialization to initialize PMCEID[01]_EL0 and build a map
- * of ARM event numbers to indices in our pm_events array.
- *
- * Note: Events in the 0x40XX range are not currently supported.
- */
-void pmu_init(ARMCPU *cpu)
-{
- unsigned int i;
-
- /*
- * Empty supported_event_map and cpu->pmceid[01] before adding supported
- * events to them
- */
- for (i = 0; i < ARRAY_SIZE(supported_event_map); i++) {
- supported_event_map[i] = UNSUPPORTED_EVENT;
- }
- cpu->pmceid0 = 0;
- cpu->pmceid1 = 0;
-
- for (i = 0; i < ARRAY_SIZE(pm_events); i++) {
- const pm_event *cnt = &pm_events[i];
- assert(cnt->number <= MAX_EVENT_ID);
- /* We do not currently support events in the 0x40xx range */
- assert(cnt->number <= 0x3f);
-
- if (cnt->supported(&cpu->env)) {
- supported_event_map[cnt->number] = i;
- uint64_t event_mask = 1ULL << (cnt->number & 0x1f);
- if (cnt->number & 0x20) {
- cpu->pmceid1 |= event_mask;
- } else {
- cpu->pmceid0 |= event_mask;
- }
- }
- }
-}
-
-/*
- * Check at runtime whether a PMU event is supported for the current machine
- */
-static bool event_supported(uint16_t number)
-{
- if (number > MAX_EVENT_ID) {
- return false;
- }
- return supported_event_map[number] != UNSUPPORTED_EVENT;
-}
-
-static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- /*
- * Performance monitor registers user accessibility is controlled
- * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable
- * trapping to EL2 or EL3 for other accesses.
- */
- int el = arm_current_el(env);
- uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
-
- if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) {
- return CP_ACCESS_TRAP_EL1;
- }
- if (el < 2 && (mdcr_el2 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL2;
- }
- if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) {
- return CP_ACCESS_TRAP_EL3;
- }
-
- return CP_ACCESS_OK;
-}
-
-static CPAccessResult pmreg_access_xevcntr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* ER: event counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 3)) != 0
- && isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_swinc(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* SW: software increment write trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 1)) != 0
- && !isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_selr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* ER: event counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 3)) != 0) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
-static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- /* CR: cycle counter read trap control */
- if (arm_feature(env, ARM_FEATURE_V8)
- && arm_current_el(env) == 0
- && (env->cp15.c9_pmuserenr & (1 << 2)) != 0
- && isread) {
- return CP_ACCESS_OK;
- }
-
- return pmreg_access(env, ri, isread);
-}
-
/*
* Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
* We use these to decide whether we need to wrap a write to MDCR_EL2
@@ -962,684 +694,6 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
(MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
-/*
- * Returns true if the counter (pass 31 for PMCCNTR) should count events using
- * the current EL, security state, and register configuration.
- */
-static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
-{
- uint64_t filter;
- bool e, p, u, nsk, nsu, nsh, m;
- bool enabled, prohibited = false, filtered;
- bool secure = arm_is_secure(env);
- int el = arm_current_el(env);
- uint64_t mdcr_el2;
- uint8_t hpmn;
-
- /*
- * We might be called for M-profile cores where MDCR_EL2 doesn't
- * exist and arm_mdcr_el2_eff() will assert, so this early-exit check
- * must be before we read that value.
- */
- if (!arm_feature(env, ARM_FEATURE_PMU)) {
- return false;
- }
-
- mdcr_el2 = arm_mdcr_el2_eff(env);
- hpmn = mdcr_el2 & MDCR_HPMN;
-
- if (!arm_feature(env, ARM_FEATURE_EL2) ||
- (counter < hpmn || counter == 31)) {
- e = env->cp15.c9_pmcr & PMCRE;
- } else {
- e = mdcr_el2 & MDCR_HPME;
- }
- enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
-
- /* Is event counting prohibited? */
- if (el == 2 && (counter < hpmn || counter == 31)) {
- prohibited = mdcr_el2 & MDCR_HPMD;
- }
- if (secure) {
- prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
- }
-
- if (counter == 31) {
- /*
- * The cycle counter defaults to running. PMCR.DP says "disable
- * the cycle counter when event counting is prohibited".
- * Some MDCR bits disable the cycle counter specifically.
- */
- prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
- if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- if (secure) {
- prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
- }
- if (el == 2) {
- prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
- }
- }
- }
-
- if (counter == 31) {
- filter = env->cp15.pmccfiltr_el0;
- } else {
- filter = env->cp15.c14_pmevtyper[counter];
- }
-
- p = filter & PMXEVTYPER_P;
- u = filter & PMXEVTYPER_U;
- nsk = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSK);
- nsu = arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_NSU);
- nsh = arm_feature(env, ARM_FEATURE_EL2) && (filter & PMXEVTYPER_NSH);
- m = arm_el_is_aa64(env, 1) &&
- arm_feature(env, ARM_FEATURE_EL3) && (filter & PMXEVTYPER_M);
-
- if (el == 0) {
- filtered = secure ? u : u != nsu;
- } else if (el == 1) {
- filtered = secure ? p : p != nsk;
- } else if (el == 2) {
- filtered = !nsh;
- } else { /* EL3 */
- filtered = m != p;
- }
-
- if (counter != 31) {
- /*
- * If not checking PMCCNTR, ensure the counter is setup to an event we
- * support
- */
- uint16_t event = filter & PMXEVTYPER_EVTCOUNT;
- if (!event_supported(event)) {
- return false;
- }
- }
-
- return enabled && !prohibited && !filtered;
-}
-
-static void pmu_update_irq(CPUARMState *env)
-{
- ARMCPU *cpu = env_archcpu(env);
- qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) &&
- (env->cp15.c9_pminten & env->cp15.c9_pmovsr));
-}
-
-static bool pmccntr_clockdiv_enabled(CPUARMState *env)
-{
- /*
- * Return true if the clock divider is enabled and the cycle counter
- * is supposed to tick only once every 64 clock cycles. This is
- * controlled by PMCR.D, but if PMCR.LC is set to enable the long
- * (64-bit) cycle counter PMCR.D has no effect.
- */
- return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
-}
-
-static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
-{
- /* Return true if the specified event counter is configured to be 64 bit */
-
- /* This isn't intended to be used with the cycle counter */
- assert(counter < 31);
-
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- return false;
- }
-
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- /*
- * MDCR_EL2.HLP still applies even when EL2 is disabled in the
- * current security state, so we don't use arm_mdcr_el2_eff() here.
- */
- bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
- int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
-
- if (counter >= hpmn) {
- return hlp;
- }
- }
- return env->cp15.c9_pmcr & PMCRLP;
-}
-
-/*
- * Ensure c15_ccnt is the guest-visible count so that operations such as
- * enabling/disabling the counter or filtering, modifying the count itself,
- * etc. can be done logically. This is essentially a no-op if the counter is
- * not enabled at the time of the call.
- */
-static void pmccntr_op_start(CPUARMState *env)
-{
- uint64_t cycles = cycles_get_count(env);
-
- if (pmu_counter_enabled(env, 31)) {
- uint64_t eff_cycles = cycles;
- if (pmccntr_clockdiv_enabled(env)) {
- eff_cycles /= 64;
- }
-
- uint64_t new_pmccntr = eff_cycles - env->cp15.c15_ccnt_delta;
-
- uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
- 1ull << 63 : 1ull << 31;
- if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
- env->cp15.c9_pmovsr |= (1ULL << 31);
- pmu_update_irq(env);
- }
-
- env->cp15.c15_ccnt = new_pmccntr;
- }
- env->cp15.c15_ccnt_delta = cycles;
-}
-
-/*
- * If PMCCNTR is enabled, recalculate the delta between the clock and the
- * guest-visible count. A call to pmccntr_op_finish should follow every call to
- * pmccntr_op_start.
- */
-static void pmccntr_op_finish(CPUARMState *env)
-{
- if (pmu_counter_enabled(env, 31)) {
-#ifndef CONFIG_USER_ONLY
- /* Calculate when the counter will next overflow */
- uint64_t remaining_cycles = -env->cp15.c15_ccnt;
- if (!(env->cp15.c9_pmcr & PMCRLC)) {
- remaining_cycles = (uint32_t)remaining_cycles;
- }
- int64_t overflow_in = cycles_ns_per(remaining_cycles);
-
- if (overflow_in > 0) {
- int64_t overflow_at;
-
- if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- overflow_in, &overflow_at)) {
- ARMCPU *cpu = env_archcpu(env);
- timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
- }
- }
-#endif
-
- uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
- if (pmccntr_clockdiv_enabled(env)) {
- prev_cycles /= 64;
- }
- env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
- }
-}
-
-static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
-{
-
- uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
- uint64_t count = 0;
- if (event_supported(event)) {
- uint16_t event_idx = supported_event_map[event];
- count = pm_events[event_idx].get_count(env);
- }
-
- if (pmu_counter_enabled(env, counter)) {
- uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
- uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
- 1ULL << 63 : 1ULL << 31;
-
- if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
- env->cp15.c9_pmovsr |= (1 << counter);
- pmu_update_irq(env);
- }
- env->cp15.c14_pmevcntr[counter] = new_pmevcntr;
- }
- env->cp15.c14_pmevcntr_delta[counter] = count;
-}
-
-static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
-{
- if (pmu_counter_enabled(env, counter)) {
-#ifndef CONFIG_USER_ONLY
- uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
- uint16_t event_idx = supported_event_map[event];
- uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
- int64_t overflow_in;
-
- if (!pmevcntr_is_64_bit(env, counter)) {
- delta = (uint32_t)delta;
- }
- overflow_in = pm_events[event_idx].ns_per_count(delta);
-
- if (overflow_in > 0) {
- int64_t overflow_at;
-
- if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
- overflow_in, &overflow_at)) {
- ARMCPU *cpu = env_archcpu(env);
- timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
- }
- }
-#endif
-
- env->cp15.c14_pmevcntr_delta[counter] -=
- env->cp15.c14_pmevcntr[counter];
- }
-}
-
-void pmu_op_start(CPUARMState *env)
-{
- unsigned int i;
- pmccntr_op_start(env);
- for (i = 0; i < pmu_num_counters(env); i++) {
- pmevcntr_op_start(env, i);
- }
-}
-
-void pmu_op_finish(CPUARMState *env)
-{
- unsigned int i;
- pmccntr_op_finish(env);
- for (i = 0; i < pmu_num_counters(env); i++) {
- pmevcntr_op_finish(env, i);
- }
-}
-
-void pmu_pre_el_change(ARMCPU *cpu, void *ignored)
-{
- pmu_op_start(&cpu->env);
-}
-
-void pmu_post_el_change(ARMCPU *cpu, void *ignored)
-{
- pmu_op_finish(&cpu->env);
-}
-
-void arm_pmu_timer_cb(void *opaque)
-{
- ARMCPU *cpu = opaque;
-
- /*
- * Update all the counter values based on the current underlying counts,
- * triggering interrupts to be raised, if necessary. pmu_op_finish() also
- * has the effect of setting the cpu->pmu_timer to the next earliest time a
- * counter may expire.
- */
- pmu_op_start(&cpu->env);
- pmu_op_finish(&cpu->env);
-}
-
-static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
-
- if (value & PMCRC) {
- /* The counter has been reset */
- env->cp15.c15_ccnt = 0;
- }
-
- if (value & PMCRP) {
- unsigned int i;
- for (i = 0; i < pmu_num_counters(env); i++) {
- env->cp15.c14_pmevcntr[i] = 0;
- }
- }
-
- env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK;
- env->cp15.c9_pmcr |= (value & PMCR_WRITABLE_MASK);
-
- pmu_op_finish(env);
-}
-
-static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint64_t pmcr = env->cp15.c9_pmcr;
-
- /*
- * If EL2 is implemented and enabled for the current security state, reads
- * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
- */
- if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
- pmcr &= ~PMCRN_MASK;
- pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
- }
-
- return pmcr;
-}
-
-static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- unsigned int i;
- uint64_t overflow_mask, new_pmswinc;
-
- for (i = 0; i < pmu_num_counters(env); i++) {
- /* Increment a counter's count iff: */
- if ((value & (1 << i)) && /* counter's bit is set */
- /* counter is enabled and not filtered */
- pmu_counter_enabled(env, i) &&
- /* counter is SW_INCR */
- (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) {
- pmevcntr_op_start(env, i);
-
- /*
- * Detect if this write causes an overflow since we can't predict
- * PMSWINC overflows like we can for other events
- */
- new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
-
- overflow_mask = pmevcntr_is_64_bit(env, i) ?
- 1ULL << 63 : 1ULL << 31;
-
- if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
- env->cp15.c9_pmovsr |= (1 << i);
- pmu_update_irq(env);
- }
-
- env->cp15.c14_pmevcntr[i] = new_pmswinc;
-
- pmevcntr_op_finish(env, i);
- }
- }
-}
-
-static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint64_t ret;
- pmccntr_op_start(env);
- ret = env->cp15.c15_ccnt;
- pmccntr_op_finish(env);
- return ret;
-}
-
-static void pmselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /*
- * The value of PMSELR.SEL affects the behavior of PMXEVTYPER and
- * PMXEVCNTR. We allow [0..31] to be written to PMSELR here; in the
- * meanwhile, we check PMSELR.SEL when PMXEVTYPER and PMXEVCNTR are
- * accessed.
- */
- env->cp15.c9_pmselr = value & 0x1f;
-}
-
-static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- env->cp15.c15_ccnt = value;
- pmccntr_op_finish(env);
-}
-
-static void pmccntr_write32(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint64_t cur_val = pmccntr_read(env, NULL);
-
- pmccntr_write(env, ri, deposit64(cur_val, 0, 32, value));
-}
-
-static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- env->cp15.pmccfiltr_el0 = value & PMCCFILTR_EL0;
- pmccntr_op_finish(env);
-}
-
-static void pmccfiltr_write_a32(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmccntr_op_start(env);
- /* M is not accessible from AArch32 */
- env->cp15.pmccfiltr_el0 = (env->cp15.pmccfiltr_el0 & PMCCFILTR_M) |
- (value & PMCCFILTR);
- pmccntr_op_finish(env);
-}
-
-static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- /* M is not visible in AArch32 */
- return env->cp15.pmccfiltr_el0 & PMCCFILTR;
-}
-
-static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmcnten |= value;
- pmu_op_finish(env);
-}
-
-static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmu_op_start(env);
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmcnten &= ~value;
- pmu_op_finish(env);
-}
-
-static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmovsr &= ~value;
- pmu_update_irq(env);
-}
-
-static void pmovsset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pmovsr |= value;
- pmu_update_irq(env);
-}
-
-static void pmevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value, const uint8_t counter)
-{
- if (counter == 31) {
- pmccfiltr_write(env, ri, value);
- } else if (counter < pmu_num_counters(env)) {
- pmevcntr_op_start(env, counter);
-
- /*
- * If this counter's event type is changing, store the current
- * underlying count for the new type in c14_pmevcntr_delta[counter] so
- * pmevcntr_op_finish has the correct baseline when it converts back to
- * a delta.
- */
- uint16_t old_event = env->cp15.c14_pmevtyper[counter] &
- PMXEVTYPER_EVTCOUNT;
- uint16_t new_event = value & PMXEVTYPER_EVTCOUNT;
- if (old_event != new_event) {
- uint64_t count = 0;
- if (event_supported(new_event)) {
- uint16_t event_idx = supported_event_map[new_event];
- count = pm_events[event_idx].get_count(env);
- }
- env->cp15.c14_pmevcntr_delta[counter] = count;
- }
-
- env->cp15.c14_pmevtyper[counter] = value & PMXEVTYPER_MASK;
- pmevcntr_op_finish(env, counter);
- }
- /*
- * Attempts to access PMXEVTYPER are CONSTRAINED UNPREDICTABLE when
- * PMSELR value is equal to or greater than the number of implemented
- * counters, but not equal to 0x1f. We opt to behave as a RAZ/WI.
- */
-}
-
-static uint64_t pmevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri,
- const uint8_t counter)
-{
- if (counter == 31) {
- return env->cp15.pmccfiltr_el0;
- } else if (counter < pmu_num_counters(env)) {
- return env->cp15.c14_pmevtyper[counter];
- } else {
- /*
- * We opt to behave as a RAZ/WI when attempts to access PMXEVTYPER
- * are CONSTRAINED UNPREDICTABLE. See comments in pmevtyper_write().
- */
- return 0;
- }
-}
-
-static void pmevtyper_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- pmevtyper_write(env, ri, value, counter);
-}
-
-static void pmevtyper_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- env->cp15.c14_pmevtyper[counter] = value;
-
- /*
- * pmevtyper_rawwrite is called between a pair of pmu_op_start and
- * pmu_op_finish calls when loading saved state for a migration. Because
- * we're potentially updating the type of event here, the value written to
- * c14_pmevcntr_delta by the preceding pmu_op_start call may be for a
- * different counter type. Therefore, we need to set this value to the
- * current count for the counter type we're writing so that pmu_op_finish
- * has the correct count for its calculation.
- */
- uint16_t event = value & PMXEVTYPER_EVTCOUNT;
- if (event_supported(event)) {
- uint16_t event_idx = supported_event_map[event];
- env->cp15.c14_pmevcntr_delta[counter] =
- pm_events[event_idx].get_count(env);
- }
-}
-
-static uint64_t pmevtyper_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- return pmevtyper_read(env, ri, counter);
-}
-
-static void pmxevtyper_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmevtyper_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return pmevtyper_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value, uint8_t counter)
-{
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
- value &= MAKE_64BIT_MASK(0, 32);
- }
- if (counter < pmu_num_counters(env)) {
- pmevcntr_op_start(env, counter);
- env->cp15.c14_pmevcntr[counter] = value;
- pmevcntr_op_finish(env, counter);
- }
- /*
- * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
- * are CONSTRAINED UNPREDICTABLE.
- */
-}
-
-static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
- uint8_t counter)
-{
- if (counter < pmu_num_counters(env)) {
- uint64_t ret;
- pmevcntr_op_start(env, counter);
- ret = env->cp15.c14_pmevcntr[counter];
- pmevcntr_op_finish(env, counter);
- if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
- /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
- ret &= MAKE_64BIT_MASK(0, 32);
- }
- return ret;
- } else {
- /*
- * We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
- * are CONSTRAINED UNPREDICTABLE.
- */
- return 0;
- }
-}
-
-static void pmevcntr_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_readfn(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- return pmevcntr_read(env, ri, counter);
-}
-
-static void pmevcntr_rawwrite(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- assert(counter < pmu_num_counters(env));
- env->cp15.c14_pmevcntr[counter] = value;
- pmevcntr_write(env, ri, value, counter);
-}
-
-static uint64_t pmevcntr_rawread(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- uint8_t counter = ((ri->crm & 3) << 3) | (ri->opc2 & 7);
- assert(counter < pmu_num_counters(env));
- return env->cp15.c14_pmevcntr[counter];
-}
-
-static void pmxevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- pmevcntr_write(env, ri, value, env->cp15.c9_pmselr & 31);
-}
-
-static uint64_t pmxevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return pmevcntr_read(env, ri, env->cp15.c9_pmselr & 31);
-}
-
-static void pmuserenr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- if (arm_feature(env, ARM_FEATURE_V8)) {
- env->cp15.c9_pmuserenr = value & 0xf;
- } else {
- env->cp15.c9_pmuserenr = value & 1;
- }
-}
-
-static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* We have no event counters so only the C bit can be changed */
- value &= pmu_counter_mask(env);
- env->cp15.c9_pminten |= value;
- pmu_update_irq(env);
-}
-
-static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- value &= pmu_counter_mask(env);
- env->cp15.c9_pminten &= ~value;
- pmu_update_irq(env);
-}
-
static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -1712,6 +766,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
if (cpu_isar_feature(aa64_ecv, cpu)) {
valid_mask |= SCR_ECVEN;
}
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ valid_mask |= SCR_GCSEN;
+ }
+ if (cpu_isar_feature(aa64_tcr2, cpu)) {
+ valid_mask |= SCR_TCR2EN;
+ }
+ if (cpu_isar_feature(aa64_sctlr2, cpu)) {
+ valid_mask |= SCR_SCTLR2EN;
+ }
+ if (cpu_isar_feature(aa64_s1pie, cpu) ||
+ cpu_isar_feature(aa64_s2pie, cpu)) {
+ valid_mask |= SCR_PIEN;
+ }
+ if (cpu_isar_feature(aa64_mec, cpu)) {
+ valid_mask |= SCR_MECEN;
+ }
} else {
valid_mask &= ~(SCR_RW | SCR_ST);
if (cpu_isar_feature(aa32_ras, cpu)) {
@@ -1746,12 +816,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
*/
if (changed & (SCR_NS | SCR_NSE)) {
tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 |
+ ARMMMUIdxBit_E10_0_GCS |
ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS |
ARMMMUIdxBit_E10_1 |
- ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E10_1_PAN |
+ ARMMMUIdxBit_E10_1_GCS |
+ ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E2));
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E2 |
+ ARMMMUIdxBit_E2_GCS));
}
}
@@ -1804,40 +879,40 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
uint64_t ret = 0;
if (hcr_el2 & HCR_IMO) {
- if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) {
ret |= CPSR_I;
}
- if (cs->interrupt_request & CPU_INTERRUPT_VINMI) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) {
ret |= ISR_IS;
ret |= CPSR_I;
}
} else {
- if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
ret |= CPSR_I;
}
- if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
ret |= ISR_IS;
ret |= CPSR_I;
}
}
if (hcr_el2 & HCR_FMO) {
- if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) {
ret |= CPSR_F;
}
- if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) {
ret |= ISR_FS;
ret |= CPSR_F;
}
} else {
- if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_FIQ)) {
ret |= CPSR_F;
}
}
if (hcr_el2 & HCR_AMO) {
- if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
+ if (cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) {
ret |= CPSR_A;
}
}
@@ -1869,171 +944,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
.access = PL1_W, .type = ARM_CP_NOP },
- /*
- * Performance monitors are implementation defined in v7,
- * but with an ARM recommended set of registers, which we
- * follow.
- *
- * Performance registers fall into three categories:
- * (a) always UNDEF in PL0, RW in PL1 (PMINTENSET, PMINTENCLR)
- * (b) RO in PL0 (ie UNDEF on write), RW in PL1 (PMUSERENR)
- * (c) UNDEF in PL0 if PMUSERENR.EN==0, otherwise accessible (all others)
- * For the cases controlled by PMUSERENR we must set .access to PL0_RW
- * or PL0_RO as appropriate and then check PMUSERENR in the helper fn.
- */
- { .name = "PMCNTENSET", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenset_write,
- .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .raw_writefn = raw_write },
- { .name = "PMCNTENSET_EL0", .state = ARM_CP_STATE_AA64, .type = ARM_CP_IO,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 1,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .resetvalue = 0,
- .writefn = pmcntenset_write, .raw_writefn = raw_write },
- { .name = "PMCNTENCLR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 2,
- .access = PL0_RW,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcnten),
- .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .writefn = pmcntenclr_write,
- .type = ARM_CP_ALIAS | ARM_CP_IO },
- { .name = "PMCNTENCLR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 2,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCNTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten),
- .writefn = pmcntenclr_write },
- { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3,
- .access = PL0_RW, .type = ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
- .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .writefn = pmovsr_write,
- .raw_writefn = raw_write },
- { .name = "PMOVSCLR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsr_write,
- .raw_writefn = raw_write },
- { .name = "PMSWINC", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc,
- .fgt = FGT_PMSWINC_EL0,
- .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .writefn = pmswinc_write },
- { .name = "PMSWINC_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 4,
- .access = PL0_W, .accessfn = pmreg_access_swinc,
- .fgt = FGT_PMSWINC_EL0,
- .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .writefn = pmswinc_write },
- { .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
- .access = PL0_RW, .type = ARM_CP_ALIAS,
- .fgt = FGT_PMSELR_EL0,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmselr),
- .accessfn = pmreg_access_selr, .writefn = pmselr_write,
- .raw_writefn = raw_write},
- { .name = "PMSELR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 5,
- .access = PL0_RW, .accessfn = pmreg_access_selr,
- .fgt = FGT_PMSELR_EL0,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr),
- .writefn = pmselr_write, .raw_writefn = raw_write, },
- { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fgt = FGT_PMCCNTR_EL0,
- .readfn = pmccntr_read, .writefn = pmccntr_write32,
- .accessfn = pmreg_access_ccntr },
- { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0,
- .access = PL0_RW, .accessfn = pmreg_access_ccntr,
- .fgt = FGT_PMCCNTR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_ccnt),
- .readfn = pmccntr_read, .writefn = pmccntr_write,
- .raw_readfn = raw_read, .raw_writefn = raw_write, },
- { .name = "PMCCFILTR", .cp = 15, .opc1 = 0, .crn = 14, .crm = 15, .opc2 = 7,
- .writefn = pmccfiltr_write_a32, .readfn = pmccfiltr_read_a32,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCCFILTR_EL0,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .resetvalue = 0, },
- { .name = "PMCCFILTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 15, .opc2 = 7,
- .writefn = pmccfiltr_write, .raw_writefn = raw_write,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCCFILTR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.pmccfiltr_el0),
- .resetvalue = 0, },
- { .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
- { .name = "PMXEVTYPER_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 1,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .writefn = pmxevtyper_write, .readfn = pmxevtyper_read },
- { .name = "PMXEVCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 2,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access_xevcntr,
- .fgt = FGT_PMEVCNTRN_EL0,
- .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
- { .name = "PMXEVCNTR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 2,
- .access = PL0_RW, .type = ARM_CP_NO_RAW | ARM_CP_IO,
- .accessfn = pmreg_access_xevcntr,
- .fgt = FGT_PMEVCNTRN_EL0,
- .writefn = pmxevcntr_write, .readfn = pmxevcntr_read },
- { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0,
- .access = PL0_R | PL1_RW, .accessfn = access_tpm,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr),
- .resetvalue = 0,
- .writefn = pmuserenr_write, .raw_writefn = raw_write },
- { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 0,
- .access = PL0_R | PL1_RW, .accessfn = access_tpm, .type = ARM_CP_ALIAS,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr),
- .resetvalue = 0,
- .writefn = pmuserenr_write, .raw_writefn = raw_write },
- { .name = "PMINTENSET", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pminten),
- .resetvalue = 0,
- .writefn = pmintenset_write, .raw_writefn = raw_write },
- { .name = "PMINTENSET_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenset_write, .raw_writefn = raw_write,
- .resetvalue = 0x0 },
- { .name = "PMINTENCLR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write, },
- { .name = "PMINTENCLR_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 2,
- .access = PL1_RW, .accessfn = access_tpm,
- .fgt = FGT_PMINTEN,
- .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_NO_RAW,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
- .writefn = pmintenclr_write },
{ .name = "CCSIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0,
.access = PL1_R,
@@ -2067,12 +977,16 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_AFSR0_EL1,
.nv2_redirect_offset = 0x128 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 0),
.type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_AFSR1_EL1,
.nv2_redirect_offset = 0x130 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 1),
.type = ARM_CP_CONST, .resetvalue = 0 },
/*
* MAIR can just read-as-written because we don't implement caches
@@ -2083,6 +997,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_MAIR_EL1,
.nv2_redirect_offset = 0x140 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 0),
.fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]),
.resetvalue = 0 },
{ .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64,
@@ -2116,25 +1032,6 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
.type = ARM_CP_NO_RAW, .access = PL1_R, .readfn = isr_read },
};
-static const ARMCPRegInfo pmovsset_cp_reginfo[] = {
- /* PMOVSSET is not implemented in v7 before v7ve */
- { .name = "PMOVSSET", .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsset_write,
- .raw_writefn = raw_write },
- { .name = "PMOVSSET_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 14, .opc2 = 3,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMOVS,
- .type = ARM_CP_ALIAS | ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr),
- .writefn = pmovsset_write,
- .raw_writefn = raw_write },
-};
-
static void teecr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -2211,7 +1108,7 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = {
.resetvalue = 0 },
};
-static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque)
+static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
@@ -3148,9 +2045,11 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
.resetfn = arm_gt_cntfrq_reset,
},
/* overall control: mostly access permissions */
- { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
+ { .name = "CNTKCTL_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
.access = PL1_RW,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 14, 1, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 14, 1, 0),
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
.resetvalue = 0,
},
@@ -3443,402 +2342,6 @@ static void par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
}
}
-#ifndef CONFIG_USER_ONLY
-/* get_phys_addr() isn't present for user-mode-only targets */
-
-static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (ri->opc2 & 4) {
- /*
- * The ATS12NSO* operations must trap to EL3 or EL2 if executed in
- * Secure EL1 (which can only happen if EL3 is AArch64).
- * They are simply UNDEF if executed from NS EL1.
- * They function normally from EL2 or EL3.
- */
- if (arm_current_el(env) == 1) {
- if (arm_is_secure_below_el3(env)) {
- if (env->cp15.scr_el3 & SCR_EEL2) {
- return CP_ACCESS_TRAP_EL2;
- }
- return CP_ACCESS_TRAP_EL3;
- }
- return CP_ACCESS_UNDEFINED;
- }
- }
- return CP_ACCESS_OK;
-}
-
-#ifdef CONFIG_TCG
-static int par_el1_shareability(GetPhysAddrResult *res)
-{
- /*
- * The PAR_EL1.SH field must be 0b10 for Device or Normal-NC
- * memory -- see pseudocode PAREncodeShareability().
- */
- if (((res->cacheattrs.attrs & 0xf0) == 0) ||
- res->cacheattrs.attrs == 0x44 || res->cacheattrs.attrs == 0x40) {
- return 2;
- }
- return res->cacheattrs.shareability;
-}
-
-static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- ARMSecuritySpace ss)
-{
- bool ret;
- uint64_t par64;
- bool format64 = false;
- ARMMMUFaultInfo fi = {};
- GetPhysAddrResult res = {};
-
- /*
- * I_MXTJT: Granule protection checks are not performed on the final
- * address of a successful translation. This is a translation not a
- * memory reference, so "memop = none = 0".
- */
- ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0,
- mmu_idx, ss, &res, &fi);
-
- /*
- * ATS operations only do S1 or S1+S2 translations, so we never
- * have to deal with the ARMCacheAttrs format for S2 only.
- */
- assert(!res.cacheattrs.is_s2_format);
-
- if (ret) {
- /*
- * Some kinds of translation fault must cause exceptions rather
- * than being reported in the PAR.
- */
- int current_el = arm_current_el(env);
- int target_el;
- uint32_t syn, fsr, fsc;
- bool take_exc = false;
-
- if (fi.s1ptw && current_el == 1
- && arm_mmu_idx_is_stage1_of_2(mmu_idx)) {
- /*
- * Synchronous stage 2 fault on an access made as part of the
- * translation table walk for AT S1E0* or AT S1E1* insn
- * executed from NS EL1. If this is a synchronous external abort
- * and SCR_EL3.EA == 1, then we take a synchronous external abort
- * to EL3. Otherwise the fault is taken as an exception to EL2,
- * and HPFAR_EL2 holds the faulting IPA.
- */
- if (fi.type == ARMFault_SyncExternalOnWalk &&
- (env->cp15.scr_el3 & SCR_EA)) {
- target_el = 3;
- } else {
- env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
- if (arm_is_secure_below_el3(env) && fi.s1ns) {
- env->cp15.hpfar_el2 |= HPFAR_NS;
- }
- target_el = 2;
- }
- take_exc = true;
- } else if (fi.type == ARMFault_SyncExternalOnWalk) {
- /*
- * Synchronous external aborts during a translation table walk
- * are taken as Data Abort exceptions.
- */
- if (fi.stage2) {
- if (current_el == 3) {
- target_el = 3;
- } else {
- target_el = 2;
- }
- } else {
- target_el = exception_target_el(env);
- }
- take_exc = true;
- }
-
- if (take_exc) {
- /* Construct FSR and FSC using same logic as arm_deliver_fault() */
- if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
- arm_s1_regime_using_lpae_format(env, mmu_idx)) {
- fsr = arm_fi_to_lfsc(&fi);
- fsc = extract32(fsr, 0, 6);
- } else {
- fsr = arm_fi_to_sfsc(&fi);
- fsc = 0x3f;
- }
- /*
- * Report exception with ESR indicating a fault due to a
- * translation table walk for a cache maintenance instruction.
- */
- syn = syn_data_abort_no_iss(current_el == target_el, 0,
- fi.ea, 1, fi.s1ptw, 1, fsc);
- env->exception.vaddress = value;
- env->exception.fsr = fsr;
- raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
- }
- }
-
- if (is_a64(env)) {
- format64 = true;
- } else if (arm_feature(env, ARM_FEATURE_LPAE)) {
- /*
- * ATS1Cxx:
- * * TTBCR.EAE determines whether the result is returned using the
- * 32-bit or the 64-bit PAR format
- * * Instructions executed in Hyp mode always use the 64bit format
- *
- * ATS1S2NSOxx uses the 64bit format if any of the following is true:
- * * The Non-secure TTBCR.EAE bit is set to 1
- * * The implementation includes EL2, and the value of HCR.VM is 1
- *
- * (Note that HCR.DC makes HCR.VM behave as if it is 1.)
- *
- * ATS1Hx always uses the 64bit format.
- */
- format64 = arm_s1_regime_using_lpae_format(env, mmu_idx);
-
- if (arm_feature(env, ARM_FEATURE_EL2)) {
- if (mmu_idx == ARMMMUIdx_E10_0 ||
- mmu_idx == ARMMMUIdx_E10_1 ||
- mmu_idx == ARMMMUIdx_E10_1_PAN) {
- format64 |= env->cp15.hcr_el2 & (HCR_VM | HCR_DC);
- } else {
- format64 |= arm_current_el(env) == 2;
- }
- }
- }
-
- if (format64) {
- /* Create a 64-bit PAR */
- par64 = (1 << 11); /* LPAE bit always set */
- if (!ret) {
- par64 |= res.f.phys_addr & ~0xfffULL;
- if (!res.f.attrs.secure) {
- par64 |= (1 << 9); /* NS */
- }
- par64 |= (uint64_t)res.cacheattrs.attrs << 56; /* ATTR */
- par64 |= par_el1_shareability(&res) << 7; /* SH */
- } else {
- uint32_t fsr = arm_fi_to_lfsc(&fi);
-
- par64 |= 1; /* F */
- par64 |= (fsr & 0x3f) << 1; /* FS */
- if (fi.stage2) {
- par64 |= (1 << 9); /* S */
- }
- if (fi.s1ptw) {
- par64 |= (1 << 8); /* PTW */
- }
- }
- } else {
- /*
- * fsr is a DFSR/IFSR value for the short descriptor
- * translation table format (with WnR always clear).
- * Convert it to a 32-bit PAR.
- */
- if (!ret) {
- /* We do not set any attribute bits in the PAR */
- if (res.f.lg_page_size == 24
- && arm_feature(env, ARM_FEATURE_V7)) {
- par64 = (res.f.phys_addr & 0xff000000) | (1 << 1);
- } else {
- par64 = res.f.phys_addr & 0xfffff000;
- }
- if (!res.f.attrs.secure) {
- par64 |= (1 << 9); /* NS */
- }
- } else {
- uint32_t fsr = arm_fi_to_sfsc(&fi);
-
- par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
- ((fsr & 0xf) << 1) | 1;
- }
- }
- return par64;
-}
-#endif /* CONFIG_TCG */
-
-static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- uint64_t par64;
- ARMMMUIdx mmu_idx;
- int el = arm_current_el(env);
- ARMSecuritySpace ss = arm_security_space(env);
-
- switch (ri->opc2 & 6) {
- case 0:
- /* stage 1 current state PL1: ATS1CPR, ATS1CPW, ATS1CPRP, ATS1CPWP */
- switch (el) {
- case 3:
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = ARMMMUIdx_E30_3_PAN;
- } else {
- mmu_idx = ARMMMUIdx_E3;
- }
- break;
- case 2:
- g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
- /* fall through */
- case 1:
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = ARMMMUIdx_Stage1_E1_PAN;
- } else {
- mmu_idx = ARMMMUIdx_Stage1_E1;
- }
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 2:
- /* stage 1 current state PL0: ATS1CUR, ATS1CUW */
- switch (el) {
- case 3:
- mmu_idx = ARMMMUIdx_E30_0;
- break;
- case 2:
- g_assert(ss != ARMSS_Secure); /* ARMv8.4-SecEL2 is 64-bit only */
- mmu_idx = ARMMMUIdx_Stage1_E0;
- break;
- case 1:
- mmu_idx = ARMMMUIdx_Stage1_E0;
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 4:
- /* stage 1+2 NonSecure PL1: ATS12NSOPR, ATS12NSOPW */
- mmu_idx = ARMMMUIdx_E10_1;
- ss = ARMSS_NonSecure;
- break;
- case 6:
- /* stage 1+2 NonSecure PL0: ATS12NSOUR, ATS12NSOUW */
- mmu_idx = ARMMMUIdx_E10_0;
- ss = ARMSS_NonSecure;
- break;
- default:
- g_assert_not_reached();
- }
-
- par64 = do_ats_write(env, value, access_type, mmu_idx, ss);
-
- A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- uint64_t par64;
-
- /* There is no SecureEL2 for AArch32. */
- par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2,
- ARMSS_NonSecure);
-
- A32_BANKED_CURRENT_REG_SET(env, par, par64);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-
-static CPAccessResult at_e012_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- /*
- * R_NYXTL: instruction is UNDEFINED if it applies to an Exception level
- * lower than EL3 and the combination SCR_EL3.{NSE,NS} is reserved. This can
- * only happen when executing at EL3 because that combination also causes an
- * illegal exception return. We don't need to check FEAT_RME either, because
- * scr_write() ensures that the NSE bit is not set otherwise.
- */
- if ((env->cp15.scr_el3 & (SCR_NSE | SCR_NS)) == SCR_NSE) {
- return CP_ACCESS_UNDEFINED;
- }
- return CP_ACCESS_OK;
-}
-
-static CPAccessResult at_s1e2_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 3 &&
- !(env->cp15.scr_el3 & (SCR_NS | SCR_EEL2))) {
- return CP_ACCESS_UNDEFINED;
- }
- return at_e012_access(env, ri, isread);
-}
-
-static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_AT)) {
- return CP_ACCESS_TRAP_EL2;
- }
- return at_e012_access(env, ri, isread);
-}
-
-static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
-#ifdef CONFIG_TCG
- MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
- ARMMMUIdx mmu_idx;
- uint64_t hcr_el2 = arm_hcr_el2_eff(env);
- bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE);
- bool for_el3 = false;
- ARMSecuritySpace ss;
-
- switch (ri->opc2 & 6) {
- case 0:
- switch (ri->opc1) {
- case 0: /* AT S1E1R, AT S1E1W, AT S1E1RP, AT S1E1WP */
- if (ri->crm == 9 && arm_pan_enabled(env)) {
- mmu_idx = regime_e20 ?
- ARMMMUIdx_E20_2_PAN : ARMMMUIdx_Stage1_E1_PAN;
- } else {
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1;
- }
- break;
- case 4: /* AT S1E2R, AT S1E2W */
- mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2;
- break;
- case 6: /* AT S1E3R, AT S1E3W */
- mmu_idx = ARMMMUIdx_E3;
- for_el3 = true;
- break;
- default:
- g_assert_not_reached();
- }
- break;
- case 2: /* AT S1E0R, AT S1E0W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_Stage1_E0;
- break;
- case 4: /* AT S12E1R, AT S12E1W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_E10_1;
- break;
- case 6: /* AT S12E0R, AT S12E0W */
- mmu_idx = regime_e20 ? ARMMMUIdx_E20_0 : ARMMMUIdx_E10_0;
- break;
- default:
- g_assert_not_reached();
- }
-
- ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env);
- env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss);
-#else
- /* Handled by hardware accelerator. */
- g_assert_not_reached();
-#endif /* CONFIG_TCG */
-}
-#endif
-
/* Return basic MPU access permission bits. */
static uint32_t simple_mpu_ap_bits(uint32_t val)
{
@@ -4276,7 +2779,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* If the ASID changes (with a 64-bit write), we must flush the TLB. */
- if (cpreg_field_is_64bit(ri) &&
+ if (cpreg_field_type(ri) == MO_64 &&
extract64(raw_read(env, ri) ^ value, 48, 16) != 0) {
ARMCPU *cpu = env_archcpu(env);
tlb_flush(CPU(cpu));
@@ -4297,7 +2800,9 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
(arm_hcr_el2_eff(env) & HCR_E2H)) {
uint16_t mask = ARMMMUIdxBit_E20_2 |
ARMMMUIdxBit_E20_2_PAN |
- ARMMMUIdxBit_E20_0;
+ ARMMMUIdxBit_E20_2_GCS |
+ ARMMMUIdxBit_E20_0 |
+ ARMMMUIdxBit_E20_0_GCS;
tlb_flush_by_mmuidx(env_cpu(env), mask);
}
raw_write(env, ri, value);
@@ -4337,6 +2842,8 @@ static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_FAR_EL1,
.nv2_redirect_offset = 0x220 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 6, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 6, 0, 0),
.fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
.resetvalue = 0, },
};
@@ -4347,12 +2854,16 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_ESR_EL1,
.nv2_redirect_offset = 0x138 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 2, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 2, 0),
.fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, },
{ .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_TTBR0_EL1,
.nv2_redirect_offset = 0x200 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 0),
.writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s),
offsetof(CPUARMState, cp15.ttbr0_ns) } },
@@ -4361,6 +2872,8 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_TTBR1_EL1,
.nv2_redirect_offset = 0x210 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 1),
.writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s),
offsetof(CPUARMState, cp15.ttbr1_ns) } },
@@ -4369,6 +2882,8 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_TCR_EL1,
.nv2_redirect_offset = 0x120 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 2),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 2),
.writefn = vmsa_tcr_el12_write,
.raw_writefn = raw_write,
.resetvalue = 0,
@@ -4413,8 +2928,12 @@ static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri,
static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+#ifdef CONFIG_USER_ONLY
+ g_assert_not_reached();
+#else
/* Wait-for-interrupt (deprecated) */
cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT);
+#endif
}
static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4468,39 +2987,6 @@ static const ARMCPRegInfo omap_cp_reginfo[] = {
.type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 },
};
-static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- env->cp15.c15_cpar = value & 0x3fff;
-}
-
-static const ARMCPRegInfo xscale_cp_reginfo[] = {
- { .name = "XSCALE_CPAR",
- .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0,
- .writefn = xscale_cpar_write, },
- { .name = "XSCALE_AUXCR",
- .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr),
- .resetvalue = 0, },
- /*
- * XScale specific cache-lockdown: since we have no cache we NOP these
- * and hope the guest does not really rely on cache behaviour.
- */
- { .name = "XSCALE_LOCK_ICACHE_LINE",
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NOP },
- { .name = "XSCALE_UNLOCK_ICACHE",
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NOP },
- { .name = "XSCALE_DCACHE_LOCK",
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_NOP },
- { .name = "XSCALE_UNLOCK_DCACHE",
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NOP },
-};
-
static const ARMCPRegInfo dummy_c15_cp_reginfo[] = {
/*
* RAZ/WI the whole crn=15 space, when we don't have a more specific
@@ -4603,12 +3089,14 @@ static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
static const ARMCPRegInfo lpae_cp_reginfo[] = {
- /* NOP AMAIR0/1 */
- { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
+ /* AMAIR0 is mapped to AMAIR_EL1[31:0] */
+ { .name = "AMAIR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_AMAIR_EL1,
.nv2_redirect_offset = 0x148 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 3, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 3, 0),
.type = ARM_CP_CONST, .resetvalue = 0 },
/* AMAIR1 is mapped to AMAIR_EL1[63:32] */
{ .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
@@ -4891,16 +3379,6 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* This may enable/disable the MMU, so do a TLB flush. */
tlb_flush(CPU(cpu));
-
- if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) {
- /*
- * Normally we would always end the TB on an SCTLR write; see the
- * comment in ARMCPRegInfo sctlr initialization below for why Xscale
- * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild
- * of hflags from the translator, so do it here.
- */
- arm_rebuild_hflags(env);
- }
}
static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -4948,15 +3426,71 @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
}
}
+static CPAccessResult access_nv1_with_nvx(uint64_t hcr_nv)
+{
+ return hcr_nv == (HCR_NV | HCR_NV1) ? CP_ACCESS_TRAP_EL2 : CP_ACCESS_OK;
+}
+
static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
if (arm_current_el(env) == 1) {
- uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2);
+ return access_nv1_with_nvx(arm_hcr_el2_nvx_eff(env));
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_nv1_or_exlock_el1(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) == 1) {
+ uint64_t nvx = arm_hcr_el2_nvx_eff(env);
- if (hcr_nv == (HCR_NV | HCR_NV1)) {
- return CP_ACCESS_TRAP_EL2;
+ if (!isread &&
+ (env->pstate & PSTATE_EXLOCK) &&
+ (env->cp15.gcscr_el[1] & GCSCR_EXLOCKEN) &&
+ !(nvx & HCR_NV1)) {
+ return CP_ACCESS_EXLOCK;
}
+ return access_nv1_with_nvx(nvx);
+ }
+
+ /*
+ * At EL2, since VHE redirection is done at translation time,
+ * el_is_in_host is always false here, so EXLOCK does not apply.
+ */
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_exlock_el2(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ int el = arm_current_el(env);
+
+ if (el == 3) {
+ return CP_ACCESS_OK;
+ }
+
+ /*
+ * Access to the EL2 register from EL1 means NV is set, and
+ * EXLOCK has priority over an NV1 trap to EL2.
+ */
+ if (!isread &&
+ (env->pstate & PSTATE_EXLOCK) &&
+ (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN)) {
+ return CP_ACCESS_EXLOCK;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_exlock_el3(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if (!isread &&
+ (env->pstate & PSTATE_EXLOCK) &&
+ (env->cp15.gcscr_el[3] & GCSCR_EXLOCKEN)) {
+ return CP_ACCESS_EXLOCK;
}
return CP_ACCESS_OK;
}
@@ -4985,7 +3519,7 @@ static void ic_ivau_write(CPUARMState *env, const ARMCPRegInfo *ri,
mmap_lock();
- tb_invalidate_phys_range(start_address, end_address);
+ tb_invalidate_phys_range(env_cpu(env), start_address, end_address);
mmap_unlock();
}
@@ -5089,53 +3623,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 14, .opc2 = 2,
.fgt = FGT_DCCISW,
.access = PL1_W, .accessfn = access_tsw, .type = ARM_CP_NOP },
-#ifndef CONFIG_USER_ONLY
- /* 64 bit address translation operations */
- { .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1R,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1W,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E0R,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E0W,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- { .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
- .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .accessfn = at_e012_access, .writefn = ats_write64 },
- /* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
- { .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write64 },
- { .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write64 },
{ .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
@@ -5143,7 +3630,6 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
.fgt = FGT_PAR_EL1,
.fieldoffset = offsetof(CPUARMState, cp15.par_el[1]),
.writefn = par_write },
-#endif
/* 32 bit cache operations */
{ .name = "ICIALLUIS", .cp = 15, .opc1 = 0, .crn = 7, .crm = 1, .opc2 = 0,
.type = ARM_CP_NOP, .access = PL1_W, .accessfn = access_ticab },
@@ -5180,14 +3666,18 @@ static const ARMCPRegInfo v8_cp_reginfo[] = {
{ .name = "ELR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1,
- .access = PL1_RW, .accessfn = access_nv1,
+ .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
.nv2_redirect_offset = 0x230 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 1),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 1),
.fieldoffset = offsetof(CPUARMState, elr_el[1]) },
{ .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL1_RW, .accessfn = access_nv1,
+ .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1,
.nv2_redirect_offset = 0x160 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 0),
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) },
/*
* We rely on the access checks not allowing the guest to write to the
@@ -5327,7 +3817,8 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
value &= valid_mask;
/* RW is RAO/WI if EL1 is AArch64 only */
- if (!cpu_isar_feature(aa64_aa32_el1, cpu)) {
+ if (arm_feature(env, ARM_FEATURE_AARCH64) &&
+ !cpu_isar_feature(aa64_aa32_el1, cpu)) {
value |= HCR_RW;
}
@@ -5472,6 +3963,16 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env));
}
+uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env)
+{
+ uint64_t hcr = arm_hcr_el2_eff(env);
+
+ if (!(hcr & HCR_NV)) {
+ return 0; /* CONSTRAINED UNPREDICTABLE wrt NV1 */
+ }
+ return hcr & (HCR_NV2 | HCR_NV1 | HCR_NV);
+}
+
/*
* Corresponds to ARM pseudocode function ELIsInHost().
*/
@@ -5506,23 +4007,27 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
ARMCPU *cpu = env_archcpu(env);
uint64_t valid_mask = 0;
- /* FEAT_MOPS adds MSCEn and MCE2 */
if (cpu_isar_feature(aa64_mops, cpu)) {
valid_mask |= HCRX_MSCEN | HCRX_MCE2;
}
-
- /* FEAT_NMI adds TALLINT, VINMI and VFNMI */
if (cpu_isar_feature(aa64_nmi, cpu)) {
valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI;
}
- /* FEAT_CMOW adds CMOW */
if (cpu_isar_feature(aa64_cmow, cpu)) {
valid_mask |= HCRX_CMOW;
}
- /* FEAT_XS adds FGTnXS, FnXS */
if (cpu_isar_feature(aa64_xs, cpu)) {
valid_mask |= HCRX_FGTNXS | HCRX_FNXS;
}
+ if (cpu_isar_feature(aa64_tcr2, cpu)) {
+ valid_mask |= HCRX_TCR2EN;
+ }
+ if (cpu_isar_feature(aa64_sctlr2, cpu)) {
+ valid_mask |= HCRX_SCTLR2EN;
+ }
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ valid_mask |= HCRX_GCSEN;
+ }
/* Clear RES0 bits. */
env->cp15.hcrx_el2 = value & valid_mask;
@@ -5580,11 +4085,22 @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env)
* This may need to be revisited for future bits.
*/
if (!arm_is_el2_enabled(env)) {
+ ARMCPU *cpu = env_archcpu(env);
uint64_t hcrx = 0;
- if (cpu_isar_feature(aa64_mops, env_archcpu(env))) {
- /* MSCEn behaves as 1 if EL2 is not enabled */
+
+ /* Bits which whose effective value is 1 if el2 not enabled. */
+ if (cpu_isar_feature(aa64_mops, cpu)) {
hcrx |= HCRX_MSCEN;
}
+ if (cpu_isar_feature(aa64_tcr2, cpu)) {
+ hcrx |= HCRX_TCR2EN;
+ }
+ if (cpu_isar_feature(aa64_sctlr2, cpu)) {
+ hcrx |= HCRX_SCTLR2EN;
+ }
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ hcrx |= HCRX_GCSEN;
+ }
return hcrx;
}
if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) {
@@ -5642,7 +4158,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "ELR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1,
- .access = PL2_RW,
+ .access = PL2_RW, .accessfn = access_exlock_el2,
.fieldoffset = offsetof(CPUARMState, elr_el[2]) },
{ .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH,
.type = ARM_CP_NV2_REDIRECT,
@@ -5660,7 +4176,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL2_RW,
+ .access = PL2_RW, .accessfn = access_exlock_el2,
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) },
{ .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0,
@@ -5746,33 +4262,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.access = PL2_RW, .type = ARM_CP_64BIT | ARM_CP_ALIAS,
.fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el[2]) },
#ifndef CONFIG_USER_ONLY
- /*
- * Unlike the other EL2-related AT operations, these must
- * UNDEF from EL3 if EL2 is not implemented, which is why we
- * define them here rather than with the rest of the AT ops.
- */
- { .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = ats_write64 },
- { .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC | ARM_CP_EL3_NO_EL2_UNDEF,
- .writefn = ats_write64 },
- /*
- * The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
- * if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
- * with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
- * to behave as if SCR.NS was 1.
- */
- { .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
- { .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
{ .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
/*
@@ -5969,7 +4458,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "ELR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1,
- .access = PL3_RW,
+ .access = PL3_RW, .accessfn = access_exlock_el3,
.fieldoffset = offsetof(CPUARMState, elr_el[3]) },
{ .name = "ESR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0,
@@ -5980,7 +4469,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = {
{ .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0,
- .access = PL3_RW,
+ .access = PL3_RW, .accessfn = access_exlock_el3,
.fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) },
{ .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0,
@@ -6048,235 +4537,6 @@ static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri,
return e2h_access(env, ri, isread);
}
-/* Test if system register redirection is to occur in the current state. */
-static bool redirect_for_e2h(CPUARMState *env)
-{
- return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H);
-}
-
-static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- CPReadFn *readfn;
-
- if (redirect_for_e2h(env)) {
- /* Switch to the saved EL2 version of the register. */
- ri = ri->opaque;
- readfn = ri->readfn;
- } else {
- readfn = ri->orig_readfn;
- }
- if (readfn == NULL) {
- readfn = raw_read;
- }
- return readfn(env, ri);
-}
-
-static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- CPWriteFn *writefn;
-
- if (redirect_for_e2h(env)) {
- /* Switch to the saved EL2 version of the register. */
- ri = ri->opaque;
- writefn = ri->writefn;
- } else {
- writefn = ri->orig_writefn;
- }
- if (writefn == NULL) {
- writefn = raw_write;
- }
- writefn(env, ri, value);
-}
-
-static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
- return ri->orig_readfn(env, ri->opaque);
-}
-
-static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Pass the EL1 register accessor its ri, not the EL12 alias ri */
- return ri->orig_writefn(env, ri->opaque, value);
-}
-
-static CPAccessResult el2_e2h_e12_access(CPUARMState *env,
- const ARMCPRegInfo *ri,
- bool isread)
-{
- if (arm_current_el(env) == 1) {
- /*
- * This must be a FEAT_NV access (will either trap or redirect
- * to memory). None of the registers with _EL12 aliases want to
- * apply their trap controls for this kind of access, so don't
- * call the orig_accessfn or do the "UNDEF when E2H is 0" check.
- */
- return CP_ACCESS_OK;
- }
- /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */
- if (!(arm_hcr_el2_eff(env) & HCR_E2H)) {
- return CP_ACCESS_UNDEFINED;
- }
- if (ri->orig_accessfn) {
- return ri->orig_accessfn(env, ri->opaque, isread);
- }
- return CP_ACCESS_OK;
-}
-
-static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu)
-{
- struct E2HAlias {
- uint32_t src_key, dst_key, new_key;
- const char *src_name, *dst_name, *new_name;
- bool (*feature)(const ARMISARegisters *id);
- };
-
-#define K(op0, op1, crn, crm, op2) \
- ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
-
- static const struct E2HAlias aliases[] = {
- { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0),
- "SCTLR", "SCTLR_EL2", "SCTLR_EL12" },
- { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2),
- "CPACR", "CPTR_EL2", "CPACR_EL12" },
- { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0),
- "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" },
- { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1),
- "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" },
- { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2),
- "TCR_EL1", "TCR_EL2", "TCR_EL12" },
- { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0),
- "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" },
- { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1),
- "ELR_EL1", "ELR_EL2", "ELR_EL12" },
- { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0),
- "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" },
- { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1),
- "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" },
- { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0),
- "ESR_EL1", "ESR_EL2", "ESR_EL12" },
- { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0),
- "FAR_EL1", "FAR_EL2", "FAR_EL12" },
- { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0),
- "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" },
- { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0),
- "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" },
- { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0),
- "VBAR", "VBAR_EL2", "VBAR_EL12" },
- { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1),
- "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" },
- { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0),
- "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" },
-
- /*
- * Note that redirection of ZCR is mentioned in the description
- * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but
- * not in the summary table.
- */
- { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0),
- "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve },
- { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6),
- "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme },
-
- { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0),
- "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte },
-
- { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7),
- "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12",
- isar_feature_aa64_scxtnum },
-
- /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */
- /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */
- };
-#undef K
-
- size_t i;
-
- for (i = 0; i < ARRAY_SIZE(aliases); i++) {
- const struct E2HAlias *a = &aliases[i];
- ARMCPRegInfo *src_reg, *dst_reg, *new_reg;
- bool ok;
-
- if (a->feature && !a->feature(&cpu->isar)) {
- continue;
- }
-
- src_reg = g_hash_table_lookup(cpu->cp_regs,
- (gpointer)(uintptr_t)a->src_key);
- dst_reg = g_hash_table_lookup(cpu->cp_regs,
- (gpointer)(uintptr_t)a->dst_key);
- g_assert(src_reg != NULL);
- g_assert(dst_reg != NULL);
-
- /* Cross-compare names to detect typos in the keys. */
- g_assert(strcmp(src_reg->name, a->src_name) == 0);
- g_assert(strcmp(dst_reg->name, a->dst_name) == 0);
-
- /* None of the core system registers use opaque; we will. */
- g_assert(src_reg->opaque == NULL);
-
- /* Create alias before redirection so we dup the right data. */
- new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo));
-
- new_reg->name = a->new_name;
- new_reg->type |= ARM_CP_ALIAS;
- /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
- new_reg->access &= PL2_RW | PL3_RW;
- /* The new_reg op fields are as per new_key, not the target reg */
- new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK)
- >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
- new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK)
- >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
- new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK)
- >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
- new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK)
- >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
- new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK)
- >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
- new_reg->opaque = src_reg;
- new_reg->orig_readfn = src_reg->readfn ?: raw_read;
- new_reg->orig_writefn = src_reg->writefn ?: raw_write;
- new_reg->orig_accessfn = src_reg->accessfn;
- if (!new_reg->raw_readfn) {
- new_reg->raw_readfn = raw_read;
- }
- if (!new_reg->raw_writefn) {
- new_reg->raw_writefn = raw_write;
- }
- new_reg->readfn = el2_e2h_e12_read;
- new_reg->writefn = el2_e2h_e12_write;
- new_reg->accessfn = el2_e2h_e12_access;
-
- /*
- * If the _EL1 register is redirected to memory by FEAT_NV2,
- * then it shares the offset with the _EL12 register,
- * and which one is redirected depends on HCR_EL2.NV1.
- */
- if (new_reg->nv2_redirect_offset) {
- assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1);
- new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1;
- new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
- }
-
- ok = g_hash_table_insert(cpu->cp_regs,
- (gpointer)(uintptr_t)a->new_key, new_reg);
- g_assert(ok);
-
- src_reg->opaque = dst_reg;
- src_reg->orig_readfn = src_reg->readfn ?: raw_read;
- src_reg->orig_writefn = src_reg->writefn ?: raw_write;
- if (!src_reg->raw_readfn) {
- src_reg->raw_readfn = raw_read;
- }
- if (!src_reg->raw_writefn) {
- src_reg->raw_writefn = raw_write;
- }
- src_reg->readfn = el2_e2h_read;
- src_reg->writefn = el2_e2h_write;
- }
-}
#endif
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri,
@@ -6569,6 +4829,8 @@ static const ARMCPRegInfo zcr_reginfo[] = {
{ .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0,
.nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 0),
.access = PL1_RW, .type = ARM_CP_SVE,
.fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]),
.writefn = zcr_write, .raw_writefn = raw_write },
@@ -6584,7 +4846,6 @@ static const ARMCPRegInfo zcr_reginfo[] = {
.writefn = zcr_write, .raw_writefn = raw_write },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_tpidr2(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -6659,7 +4920,7 @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask)
* when disabled either.
*/
if (change & new & R_SVCR_ZA_MASK) {
- memset(env->zarray, 0, sizeof(env->zarray));
+ memset(&env->za_state, 0, sizeof(env->za_state));
}
if (tcg_enabled()) {
@@ -6678,10 +4939,14 @@ static void smcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
{
int cur_el = arm_current_el(env);
int old_len = sve_vqm1_for_el(env, cur_el);
+ uint64_t valid_mask = R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
int new_len;
QEMU_BUILD_BUG_ON(ARM_MAX_VQ > R_SMCR_LEN_MASK + 1);
- value &= R_SMCR_LEN_MASK | R_SMCR_FA64_MASK;
+ if (cpu_isar_feature(aa64_sme2, env_archcpu(env))) {
+ valid_mask |= R_SMCR_EZT0_MASK;
+ }
+ value &= valid_mask;
raw_write(env, ri, value);
/*
@@ -6711,6 +4976,8 @@ static const ARMCPRegInfo sme_reginfo[] = {
{ .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6,
.nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 6),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 6),
.access = PL1_RW, .type = ARM_CP_SME,
.fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]),
.writefn = smcr_write, .raw_writefn = raw_write },
@@ -6757,6 +5024,11 @@ static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri,
R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK |
R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK;
+ if (cpu_isar_feature(aa64_rme_gpc2, env_archcpu(env))) {
+ rw_mask |= R_GPCCR_APPSAA_MASK | R_GPCCR_NSO_MASK |
+ R_GPCCR_SPAD_MASK | R_GPCCR_NSPAD_MASK | R_GPCCR_RLPAD_MASK;
+ }
+
env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask);
}
@@ -6818,107 +5090,97 @@ static const ARMCPRegInfo nmi_reginfo[] = {
.writefn = aa64_allint_write, .readfn = aa64_allint_read,
.resetfn = arm_cp_reset_ignore },
};
-#endif /* TARGET_AARCH64 */
-static void define_pmu_regs(ARMCPU *cpu)
+static CPAccessResult mecid_access(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
{
- /*
- * v7 performance monitor control register: same implementor
- * field as main ID register, and we implement four counters in
- * addition to the cycle count register.
- */
- unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
- ARMCPRegInfo pmcr = {
- .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW,
- .fgt = FGT_PMCR_EL0,
- .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
- .accessfn = pmreg_access,
- .readfn = pmcr_read, .raw_readfn = raw_read,
- .writefn = pmcr_write, .raw_writefn = raw_write,
- };
- ARMCPRegInfo pmcr64 = {
- .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0,
- .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMCR_EL0,
- .type = ARM_CP_IO,
- .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
- .resetvalue = cpu->isar.reset_pmcr_el0,
- .readfn = pmcr_read, .raw_readfn = raw_read,
- .writefn = pmcr_write, .raw_writefn = raw_write,
- };
+ int el = arm_current_el(env);
- define_one_arm_cp_reg(cpu, &pmcr);
- define_one_arm_cp_reg(cpu, &pmcr64);
- for (i = 0; i < pmcrn; i++) {
- char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i);
- char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i);
- char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i);
- char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i);
- ARMCPRegInfo pmev_regs[] = {
- { .name = pmevcntr_name, .cp = 15, .crn = 14,
- .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fgt = FGT_PMEVCNTRN_EL0,
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
- .accessfn = pmreg_access_xevcntr },
- { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)),
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access_xevcntr,
- .type = ARM_CP_IO,
- .fgt = FGT_PMEVCNTRN_EL0,
- .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn,
- .raw_readfn = pmevcntr_rawread,
- .raw_writefn = pmevcntr_rawwrite },
- { .name = pmevtyper_name, .cp = 15, .crn = 14,
- .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7,
- .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS,
- .fgt = FGT_PMEVTYPERN_EL0,
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
- .accessfn = pmreg_access },
- { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)),
- .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access,
- .fgt = FGT_PMEVTYPERN_EL0,
- .type = ARM_CP_IO,
- .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn,
- .raw_writefn = pmevtyper_rawwrite },
- };
- define_arm_cp_regs(cpu, pmev_regs);
- g_free(pmevcntr_name);
- g_free(pmevcntr_el0_name);
- g_free(pmevtyper_name);
- g_free(pmevtyper_el0_name);
- }
- if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
- ARMCPRegInfo v81_pmu_regs[] = {
- { .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid0, 32, 32) },
- { .name = "PMCEID3", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid1, 32, 32) },
- };
- define_arm_cp_regs(cpu, v81_pmu_regs);
- }
- if (cpu_isar_feature(any_pmuv3p4, cpu)) {
- static const ARMCPRegInfo v84_pmmir = {
- .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
- .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMMIR_EL1,
- .resetvalue = 0
- };
- define_one_arm_cp_reg(cpu, &v84_pmmir);
+ if (el == 2) {
+ if (arm_security_space(env) != ARMSS_Realm) {
+ return CP_ACCESS_UNDEFINED;
+ }
+
+ if (!(env->cp15.scr_el3 & SCR_MECEN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ }
+
+ return CP_ACCESS_OK;
+}
+
+static void mecid_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ value = extract64(value, 0, MECID_WIDTH);
+ raw_write(env, ri, value);
+}
+
+static CPAccessResult cipae_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ switch (arm_security_space(env)) {
+ case ARMSS_Root: /* EL3 */
+ case ARMSS_Realm: /* Realm EL2 */
+ return CP_ACCESS_OK;
+ default:
+ return CP_ACCESS_UNDEFINED;
}
}
+static const ARMCPRegInfo mec_reginfo[] = {
+ { .name = "MECIDR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 7, .crn = 10, .crm = 8,
+ .access = PL2_R, .type = ARM_CP_CONST | ARM_CP_NV_NO_TRAP,
+ .resetvalue = MECID_WIDTH - 1 },
+ { .name = "MECID_P0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_p0_el2) },
+ { .name = "MECID_A0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_a0_el2) },
+ { .name = "MECID_P1_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_p1_el2) },
+ { .name = "MECID_A1_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 8,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_a1_el2) },
+ { .name = "MECID_RL_A_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .opc2 = 1, .crn = 10, .crm = 10,
+ .access = PL3_RW, .accessfn = mecid_access,
+ .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.mecid_rl_a_el3) },
+ { .name = "VMECID_P_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 9,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmecid_p_el2) },
+ { .name = "VMECID_A_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 9,
+ .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP,
+ .accessfn = mecid_access, .writefn = mecid_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.vmecid_a_el2) },
+ { .name = "DC_CIPAE", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 0,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
+ .accessfn = cipae_access },
+};
+
+static const ARMCPRegInfo mec_mte_reginfo[] = {
+ { .name = "DC_CIGDPAE", .state = ARM_CP_STATE_AA64,
+ .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 7,
+ .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP,
+ .accessfn = cipae_access },
+};
+
#ifndef CONFIG_USER_ONLY
/*
* We don't know until after realize whether there's a GICv3
@@ -6929,10 +5191,10 @@ static void define_pmu_regs(ARMCPU *cpu)
static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
- uint64_t pfr1 = cpu->isar.id_pfr1;
+ uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1);
if (env->gicv3state) {
- pfr1 |= 1 << 28;
+ pfr1 = FIELD_DP64(pfr1, ID_PFR1, GIC, 1);
}
return pfr1;
}
@@ -6940,10 +5202,10 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
ARMCPU *cpu = env_archcpu(env);
- uint64_t pfr0 = cpu->isar.id_aa64pfr0;
+ uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0);
if (env->gicv3state) {
- pfr0 |= 1 << 24;
+ pfr0 = FIELD_DP64(pfr0, ID_AA64PFR0, GIC, 1);
}
return pfr0;
}
@@ -7010,7 +5272,6 @@ static const ARMCPRegInfo lor_reginfo[] = {
.type = ARM_CP_CONST, .resetvalue = 0 },
};
-#ifdef TARGET_AARCH64
static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7120,7 +5381,7 @@ static const ARMCPRegInfo rndr_reginfo[] = {
.access = PL0_R, .readfn = rndr_readfn },
};
-static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
+static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
#ifdef CONFIG_TCG
@@ -7257,6 +5518,8 @@ static const ARMCPRegInfo mte_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tfsr_el1,
.nv2_redirect_offset = 0x190 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 6, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 6, 0),
.fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) },
{ .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_NV2_REDIRECT,
@@ -7432,6 +5695,8 @@ static const ARMCPRegInfo scxtnum_reginfo[] = {
.access = PL1_RW, .accessfn = access_scxtnum_el1,
.fgt = FGT_SCXTNUM_EL1,
.nv2_redirect_offset = 0x188 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 7),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 7),
.fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) },
{ .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7,
@@ -7503,8 +5768,6 @@ static const ARMCPRegInfo nv2_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.vncr_el2) },
};
-#endif /* TARGET_AARCH64 */
-
static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
@@ -7700,32 +5963,6 @@ static const ARMCPRegInfo vhe_reginfo[] = {
#endif
};
-#ifndef CONFIG_USER_ONLY
-static const ARMCPRegInfo ats1e1_reginfo[] = {
- { .name = "AT_S1E1RP", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1RP,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
- { .name = "AT_S1E1WP", .state = ARM_CP_STATE_AA64,
- .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .fgt = FGT_ATS1E1WP,
- .accessfn = at_s1e01_access, .writefn = ats_write64 },
-};
-
-static const ARMCPRegInfo ats1cp_reginfo[] = {
- { .name = "ATS1CPRP",
- .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write },
- { .name = "ATS1CPWP",
- .cp = 15, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
- .writefn = ats_write },
-};
-#endif
-
/*
* ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and
* ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field
@@ -7746,10 +5983,218 @@ static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = {
.resetvalue = 0 },
};
+static CPAccessResult sctlr2_el2_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) < 3
+ && arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_SCTLR2EN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult sctlr2_el1_access(CPUARMState *env,
+ const ARMCPRegInfo *ri,
+ bool isread)
+{
+ CPAccessResult ret = access_tvm_trvm(env, ri, isread);
+ if (ret != CP_ACCESS_OK) {
+ return ret;
+ }
+ if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_SCTLR2EN)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return sctlr2_el2_access(env, ri, isread);
+}
+
+static void sctlr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t valid_mask = 0;
+
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
+static void sctlr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t valid_mask = 0;
+
+ if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
+ valid_mask |= SCTLR2_EMEC;
+ }
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
+static void sctlr2_el3_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t valid_mask = 0;
+
+ if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
+ valid_mask |= SCTLR2_EMEC;
+ }
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
+static const ARMCPRegInfo sctlr2_reginfo[] = {
+ { .name = "SCTLR2_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 1, .crm = 0,
+ .access = PL1_RW, .accessfn = sctlr2_el1_access,
+ .writefn = sctlr2_el1_write, .fgt = FGT_SCTLR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 3),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 3),
+ .nv2_redirect_offset = 0x278 | NV2_REDIR_NV1,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[1]) },
+ { .name = "SCTLR2_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 1, .crm = 0,
+ .access = PL2_RW, .accessfn = sctlr2_el2_access,
+ .writefn = sctlr2_el2_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[2]) },
+ { .name = "SCTLR2_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 1, .crm = 0,
+ .access = PL3_RW, .writefn = sctlr2_el3_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[3]) },
+};
+
+static CPAccessResult tcr2_el2_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) < 3
+ && arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_TCR2EN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult tcr2_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ CPAccessResult ret = access_tvm_trvm(env, ri, isread);
+ if (ret != CP_ACCESS_OK) {
+ return ret;
+ }
+ if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_TCR2EN)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ return tcr2_el2_access(env, ri, isread);
+}
+
+static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t valid_mask = 0;
+
+ if (cpu_isar_feature(aa64_s1pie, cpu)) {
+ valid_mask |= TCR2_PIE;
+ }
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
+static void tcr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = env_archcpu(env);
+ uint64_t valid_mask = 0;
+
+ if (cpu_isar_feature(aa64_s1pie, cpu)) {
+ valid_mask |= TCR2_PIE;
+ }
+ if (cpu_isar_feature(aa64_mec, env_archcpu(env))) {
+ valid_mask |= TCR2_AMEC0 | TCR2_AMEC1;
+ }
+ value &= valid_mask;
+ raw_write(env, ri, value);
+}
+
+static const ARMCPRegInfo tcr2_reginfo[] = {
+ { .name = "TCR2_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 2, .crm = 0,
+ .access = PL1_RW, .accessfn = tcr2_el1_access,
+ .writefn = tcr2_el1_write, .fgt = FGT_TCR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 3),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 3),
+ .nv2_redirect_offset = 0x270 | NV2_REDIR_NV1,
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[1]) },
+ { .name = "TCR2_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 2, .crm = 0,
+ .access = PL2_RW, .accessfn = tcr2_el2_access,
+ .writefn = tcr2_el2_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[2]) },
+};
+
+static CPAccessResult pien_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_PIEN)
+ && arm_current_el(env) < 3) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult pien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ CPAccessResult ret = access_tvm_trvm(env, ri, isread);
+ if (ret == CP_ACCESS_OK) {
+ ret = pien_access(env, ri, isread);
+ }
+ return ret;
+}
+
+static const ARMCPRegInfo s1pie_reginfo[] = {
+ { .name = "PIR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 10, .crm = 2,
+ .access = PL1_RW, .accessfn = pien_el1_access,
+ .fgt = FGT_NPIR_EL1, .nv2_redirect_offset = 0x2a0 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 3),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 3),
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[1]) },
+ { .name = "PIR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 2,
+ .access = PL2_RW, .accessfn = pien_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[2]) },
+ { .name = "PIR_EL3", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 10, .crm = 2,
+ .access = PL3_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[3]) },
+ { .name = "PIRE0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 10, .crm = 2,
+ .access = PL1_RW, .accessfn = pien_el1_access,
+ .fgt = FGT_NPIRE0_EL1, .nv2_redirect_offset = 0x290 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 2),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 2),
+ .fieldoffset = offsetof(CPUARMState, cp15.pir_el[0]) },
+ { .name = "PIRE0_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 2,
+ .access = PL2_RW, .accessfn = pien_access,
+ .fieldoffset = offsetof(CPUARMState, cp15.pire0_el2) },
+};
+
+static const ARMCPRegInfo s2pie_reginfo[] = {
+ { .name = "S2PIR_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .opc2 = 5, .crn = 10, .crm = 2,
+ .access = PL2_RW, .accessfn = pien_access,
+ .nv2_redirect_offset = 0x2b0,
+ .fieldoffset = offsetof(CPUARMState, cp15.s2pir_el2) },
+};
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
CPUARMState *env = &cpu->env;
+ ARMISARegisters *isar = &cpu->isar;
+
if (arm_feature(env, ARM_FEATURE_M)) {
/* M profile has no coprocessor registers */
return;
@@ -7764,7 +6209,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, not_v8_cp_reginfo);
}
- define_tlb_insn_regs(cpu);
+#ifndef CONFIG_USER_ONLY
+ if (tcg_enabled()) {
+ define_tlb_insn_regs(cpu);
+ define_at_insn_regs(cpu);
+ }
+#endif
if (arm_feature(env, ARM_FEATURE_V6)) {
/* The ID registers all have impdef reset values */
@@ -7773,7 +6223,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_pfr0 },
+ .resetvalue = GET_IDREG(isar, ID_PFR0)},
/*
* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
* the value of the GIC field until after we define these regs.
@@ -7784,7 +6234,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.accessfn = access_aa32_tid3,
#ifdef CONFIG_USER_ONLY
.type = ARM_CP_CONST,
- .resetvalue = cpu->isar.id_pfr1,
+ .resetvalue = GET_IDREG(isar, ID_PFR1),
#else
.type = ARM_CP_NO_RAW,
.accessfn = access_aa32_tid3,
@@ -7796,72 +6246,72 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_dfr0 },
+ .resetvalue = GET_IDREG(isar, ID_DFR0)},
{ .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->id_afr0 },
+ .resetvalue = GET_IDREG(isar, ID_AFR0)},
{ .name = "ID_MMFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr0 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR0)},
{ .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr1 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR1)},
{ .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr2 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR2)},
{ .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr3 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR3)},
{ .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar0 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR0)},
{ .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar1 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR1)},
{ .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar2 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR2)},
{ .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar3 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR3) },
{ .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar4 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR4) },
{ .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar5 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR5) },
{ .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_mmfr4 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR4)},
{ .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa32_tid3,
- .resetvalue = cpu->isar.id_isar6 },
+ .resetvalue = GET_IDREG(isar, ID_ISAR6) },
};
define_arm_cp_regs(cpu, v6_idregs);
define_arm_cp_regs(cpu, v6_cp_reginfo);
@@ -7871,9 +6321,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_V6K)) {
define_arm_cp_regs(cpu, v6k_cp_reginfo);
}
- if (arm_feature(env, ARM_FEATURE_V7VE)) {
- define_arm_cp_regs(cpu, pmovsset_cp_reginfo);
- }
if (arm_feature(env, ARM_FEATURE_V7)) {
ARMCPRegInfo clidr = {
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
@@ -7881,12 +6328,11 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_tid4,
.fgt = FGT_CLIDR_EL1,
- .resetvalue = cpu->clidr
+ .resetvalue = GET_IDREG(isar, CLIDR)
};
define_one_arm_cp_reg(cpu, &clidr);
define_arm_cp_regs(cpu, v7_cp_reginfo);
define_debug_regs(cpu);
- define_pmu_regs(cpu);
} else {
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
@@ -7912,7 +6358,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.access = PL1_R,
#ifdef CONFIG_USER_ONLY
.type = ARM_CP_CONST,
- .resetvalue = cpu->isar.id_aa64pfr0
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR0)
#else
.type = ARM_CP_NO_RAW,
.accessfn = access_aa64_tid3,
@@ -7924,12 +6370,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64pfr1},
- { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR1)},
+ { .name = "ID_AA64PFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = 0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64PFR2)},
{ .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7939,12 +6385,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64zfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ZFR0)},
{ .name = "ID_AA64SMFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64smfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64SMFR0)},
{ .name = "ID_AA64PFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7959,12 +6405,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64dfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64DFR0) },
{ .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64dfr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64DFR1) },
{ .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7979,12 +6425,12 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->id_aa64afr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64AFR0) },
{ .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->id_aa64afr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64AFR1) },
{ .name = "ID_AA64AFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -7999,17 +6445,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR0)},
{ .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR1)},
{ .name = "ID_AA64ISAR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64isar2 },
+ .resetvalue = GET_IDREG(isar, ID_AA64ISAR2)},
{ .name = "ID_AA64ISAR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8039,22 +6485,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr0 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR0)},
{ .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr1 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR1) },
{ .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr2 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR2) },
{ .name = "ID_AA64MMFR3_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_aa64mmfr3 },
+ .resetvalue = GET_IDREG(isar, ID_AA64MMFR3) },
{ .name = "ID_AA64MMFR4_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
@@ -8126,42 +6572,22 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_pfr2 },
+ .resetvalue = GET_IDREG(isar, ID_PFR2)},
{ .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_dfr1 },
+ .resetvalue = GET_IDREG(isar, ID_DFR1)},
{ .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
- .resetvalue = cpu->isar.id_mmfr5 },
+ .resetvalue = GET_IDREG(isar, ID_MMFR5)},
{ .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST,
.accessfn = access_aa64_tid3,
.resetvalue = 0 },
- { .name = "PMCEID0", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 6,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid0, 0, 32) },
- { .name = "PMCEID0_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 6,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = cpu->pmceid0 },
- { .name = "PMCEID1", .state = ARM_CP_STATE_AA32,
- .cp = 15, .opc1 = 0, .crn = 9, .crm = 12, .opc2 = 7,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = extract64(cpu->pmceid1, 0, 32) },
- { .name = "PMCEID1_EL0", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 7,
- .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST,
- .fgt = FGT_PMCEIDN_EL0,
- .resetvalue = cpu->pmceid1 },
};
#ifdef CONFIG_USER_ONLY
static const ARMCPRegUserSpaceInfo v8_user_idregs[] = {
@@ -8177,6 +6603,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
R_ID_AA64PFR1_SSBS_MASK |
R_ID_AA64PFR1_MTE_MASK |
R_ID_AA64PFR1_SME_MASK },
+ { .name = "ID_AA64PFR2_EL1",
+ .exported_bits = 0 },
{ .name = "ID_AA64PFR*_EL1_RESERVED",
.is_glob = true },
{ .name = "ID_AA64ZFR0_EL1",
@@ -8496,12 +6924,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.par_s),
offsetoflow32(CPUARMState, cp15.par_ns) },
.writefn = par_write},
-#ifndef CONFIG_USER_ONLY
- /* This underdecoding is safe because the reginfo is NO_RAW. */
- { .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
- .access = PL1_W, .accessfn = ats_access,
- .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
-#endif
};
/*
@@ -8528,9 +6950,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_STRONGARM)) {
define_arm_cp_regs(cpu, strongarm_cp_reginfo);
}
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- define_arm_cp_regs(cpu, xscale_cp_reginfo);
- }
if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) {
define_arm_cp_regs(cpu, dummy_c15_cp_reginfo);
}
@@ -8853,12 +7272,14 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_VBAR)) {
static const ARMCPRegInfo vbar_cp_reginfo[] = {
- { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
+ { .name = "VBAR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .writefn = vbar_write,
.accessfn = access_nv1,
.fgt = FGT_VBAR_EL1,
.nv2_redirect_offset = 0x250 | NV2_REDIR_NV1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 12, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 12, 0, 0),
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s),
offsetof(CPUARMState, cp15.vbar_ns) },
.resetvalue = 0 },
@@ -8869,24 +7290,18 @@ void register_cp_regs_for_features(ARMCPU *cpu)
/* Generic registers whose values depend on the implementation */
{
ARMCPRegInfo sctlr = {
- .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
+ .name = "SCTLR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tvm_trvm,
.fgt = FGT_SCTLR_EL1,
+ .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 0),
+ .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 0),
.nv2_redirect_offset = 0x110 | NV2_REDIR_NV1,
.bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s),
offsetof(CPUARMState, cp15.sctlr_ns) },
.writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
.raw_writefn = raw_write,
};
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- /*
- * Normally we would always end the TB on an SCTLR write, but Linux
- * arch/arm/mach-pxa/sleep.S expects two instructions following
- * an MMU enable to execute from cache. Imitate this behaviour.
- */
- sctlr.type |= ARM_CP_SUPPRESS_TB_END;
- }
define_one_arm_cp_reg(cpu, &sctlr);
if (arm_feature(env, ARM_FEATURE_PMSA) &&
@@ -8907,14 +7322,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_pan, cpu)) {
define_one_arm_cp_reg(cpu, &pan_reginfo);
}
-#ifndef CONFIG_USER_ONLY
- if (cpu_isar_feature(aa64_ats1e1, cpu)) {
- define_arm_cp_regs(cpu, ats1e1_reginfo);
- }
- if (cpu_isar_feature(aa32_ats1e1, cpu)) {
- define_arm_cp_regs(cpu, ats1cp_reginfo);
- }
-#endif
if (cpu_isar_feature(aa64_uao, cpu)) {
define_one_arm_cp_reg(cpu, &uao_reginfo);
}
@@ -8945,7 +7352,6 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
}
-#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_sme, cpu)) {
define_arm_cp_regs(cpu, sme_reginfo);
}
@@ -9006,7 +7412,27 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (cpu_isar_feature(aa64_nmi, cpu)) {
define_arm_cp_regs(cpu, nmi_reginfo);
}
-#endif
+
+ if (cpu_isar_feature(aa64_sctlr2, cpu)) {
+ define_arm_cp_regs(cpu, sctlr2_reginfo);
+ }
+
+ if (cpu_isar_feature(aa64_tcr2, cpu)) {
+ define_arm_cp_regs(cpu, tcr2_reginfo);
+ }
+
+ if (cpu_isar_feature(aa64_s1pie, cpu)) {
+ define_arm_cp_regs(cpu, s1pie_reginfo);
+ }
+ if (cpu_isar_feature(aa64_s2pie, cpu)) {
+ define_arm_cp_regs(cpu, s2pie_reginfo);
+ }
+ if (cpu_isar_feature(aa64_mec, cpu)) {
+ define_arm_cp_regs(cpu, mec_reginfo);
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ define_arm_cp_regs(cpu, mec_mte_reginfo);
+ }
+ }
if (cpu_isar_feature(any_predinv, cpu)) {
define_arm_cp_regs(cpu, predinv_reginfo);
@@ -9016,60 +7442,42 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, ccsidr2_reginfo);
}
-#ifndef CONFIG_USER_ONLY
- /*
- * Register redirections and aliases must be done last,
- * after the registers from the other extensions have been defined.
- */
- if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) {
- define_arm_vh_e2h_redirects_aliases(cpu);
+ define_pm_cpregs(cpu);
+ define_gcs_cpregs(cpu);
+}
+
+/*
+ * Copy a ARMCPRegInfo structure, allocating it along with the name
+ * and an optional suffix to the name.
+ */
+static ARMCPRegInfo *alloc_cpreg(const ARMCPRegInfo *in, const char *suffix)
+{
+ const char *name = in->name;
+ size_t name_len = strlen(name);
+ size_t suff_len = suffix ? strlen(suffix) : 0;
+ ARMCPRegInfo *out = g_malloc(sizeof(*in) + name_len + suff_len + 1);
+ char *p = (char *)(out + 1);
+
+ *out = *in;
+ out->name = p;
+
+ memcpy(p, name, name_len + 1);
+ if (suffix) {
+ memcpy(p + name_len, suffix, suff_len + 1);
}
-#endif
+ return out;
}
/*
- * Private utility function for define_one_arm_cp_reg_with_opaque():
+ * Private utility function for define_one_arm_cp_reg():
* add a single reginfo struct to the hash table.
*/
-static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
- void *opaque, CPState state,
- CPSecureState secstate,
- int crm, int opc1, int opc2,
- const char *name)
+static void add_cpreg_to_hashtable(ARMCPU *cpu, ARMCPRegInfo *r,
+ CPState state, CPSecureState secstate,
+ uint32_t key)
{
CPUARMState *env = &cpu->env;
- uint32_t key;
- ARMCPRegInfo *r2;
- bool is64 = r->type & ARM_CP_64BIT;
bool ns = secstate & ARM_CP_SECSTATE_NS;
- int cp = r->cp;
- size_t name_len;
- bool make_const;
-
- switch (state) {
- case ARM_CP_STATE_AA32:
- /* We assume it is a cp15 register if the .cp field is left unset. */
- if (cp == 0 && r->state == ARM_CP_STATE_BOTH) {
- cp = 15;
- }
- key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2);
- break;
- case ARM_CP_STATE_AA64:
- /*
- * To allow abbreviation of ARMCPRegInfo definitions, we treat
- * cp == 0 as equivalent to the value for "standard guest-visible
- * sysreg". STATE_BOTH definitions are also always "standard sysreg"
- * in their AArch64 view (the .cp value may be non-zero for the
- * benefit of the AArch32 view).
- */
- if (cp == 0 || r->state == ARM_CP_STATE_BOTH) {
- cp = CP_REG_ARM64_SYSREG_CP;
- }
- key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2);
- break;
- default:
- g_assert_not_reached();
- }
/* Overriding of an existing definition must be explicitly requested. */
if (!(r->type & ARM_CP_OVERRIDE)) {
@@ -9079,84 +7487,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
}
}
- /*
- * Eliminate registers that are not present because the EL is missing.
- * Doing this here makes it easier to put all registers for a given
- * feature into the same ARMCPRegInfo array and define them all at once.
- */
- make_const = false;
- if (arm_feature(env, ARM_FEATURE_EL3)) {
- /*
- * An EL2 register without EL2 but with EL3 is (usually) RES0.
- * See rule RJFFP in section D1.1.3 of DDI0487H.a.
- */
- int min_el = ctz32(r->access) / 2;
- if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
- if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
- return;
- }
- make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP);
- }
- } else {
- CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
- ? PL2_RW : PL1_RW);
- if ((r->access & max_el) == 0) {
- return;
- }
- }
-
- /* Combine cpreg and name into one allocation. */
- name_len = strlen(name) + 1;
- r2 = g_malloc(sizeof(*r2) + name_len);
- *r2 = *r;
- r2->name = memcpy(r2 + 1, name, name_len);
-
- /*
- * Update fields to match the instantiation, overwiting wildcards
- * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH.
- */
- r2->cp = cp;
- r2->crm = crm;
- r2->opc1 = opc1;
- r2->opc2 = opc2;
- r2->state = state;
- r2->secure = secstate;
- if (opaque) {
- r2->opaque = opaque;
- }
-
- if (make_const) {
- /* This should not have been a very special register to begin. */
- int old_special = r2->type & ARM_CP_SPECIAL_MASK;
- assert(old_special == 0 || old_special == ARM_CP_NOP);
- /*
- * Set the special function to CONST, retaining the other flags.
- * This is important for e.g. ARM_CP_SVE so that we still
- * take the SVE trap if CPTR_EL3.EZ == 0.
- */
- r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
- /*
- * Usually, these registers become RES0, but there are a few
- * special cases like VPIDR_EL2 which have a constant non-zero
- * value with writes ignored.
- */
- if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
- r2->resetvalue = 0;
- }
- /*
- * ARM_CP_CONST has precedence, so removing the callbacks and
- * offsets are not strictly necessary, but it is potentially
- * less confusing to debug later.
- */
- r2->readfn = NULL;
- r2->writefn = NULL;
- r2->raw_readfn = NULL;
- r2->raw_writefn = NULL;
- r2->resetfn = NULL;
- r2->fieldoffset = 0;
- r2->bank_fieldoffsets[0] = 0;
- r2->bank_fieldoffsets[1] = 0;
- } else {
+ {
bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1];
if (isbanked) {
@@ -9165,7 +7496,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
* Overwriting fieldoffset as the array is only used to define
* banked registers but later only fieldoffset is used.
*/
- r2->fieldoffset = r->bank_fieldoffsets[ns];
+ r->fieldoffset = r->bank_fieldoffsets[ns];
}
if (state == ARM_CP_STATE_AA32) {
if (isbanked) {
@@ -9182,54 +7513,187 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r,
*/
if ((r->state == ARM_CP_STATE_BOTH && ns) ||
(arm_feature(env, ARM_FEATURE_V8) && !ns)) {
- r2->type |= ARM_CP_ALIAS;
+ r->type |= ARM_CP_ALIAS;
}
} else if ((secstate != r->secure) && !ns) {
/*
* The register is not banked so we only want to allow
* migration of the non-secure instance.
*/
- r2->type |= ARM_CP_ALIAS;
- }
-
- if (HOST_BIG_ENDIAN &&
- r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) {
- r2->fieldoffset += sizeof(uint32_t);
+ r->type |= ARM_CP_ALIAS;
}
}
}
/*
- * By convention, for wildcarded registers only the first
- * entry is used for migration; the others are marked as
- * ALIAS so we don't try to transfer the register
- * multiple times. Special registers (ie NOP/WFI) are
- * never migratable and not even raw-accessible.
+ * For 32-bit AArch32 regs shared with 64-bit AArch64 regs,
+ * adjust the field offset for endianness. This had to be
+ * delayed until banked registers were resolved.
*/
- if (r2->type & ARM_CP_SPECIAL_MASK) {
- r2->type |= ARM_CP_NO_RAW;
+ if (HOST_BIG_ENDIAN &&
+ state == ARM_CP_STATE_AA32 &&
+ r->state == ARM_CP_STATE_BOTH &&
+ r->fieldoffset) {
+ r->fieldoffset += sizeof(uint32_t);
}
- if (((r->crm == CP_ANY) && crm != 0) ||
- ((r->opc1 == CP_ANY) && opc1 != 0) ||
- ((r->opc2 == CP_ANY) && opc2 != 0)) {
- r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
+
+ /*
+ * Special registers (ie NOP/WFI) are never migratable and
+ * are not even raw-accessible.
+ */
+ if (r->type & ARM_CP_SPECIAL_MASK) {
+ r->type |= ARM_CP_NO_RAW;
}
/*
+ * Update fields to match the instantiation, overwiting wildcards
+ * such as ARM_CP_STATE_BOTH or ARM_CP_SECSTATE_BOTH.
+ */
+ r->state = state;
+ r->secure = secstate;
+
+ /*
* Check that raw accesses are either forbidden or handled. Note that
* we can't assert this earlier because the setup of fieldoffset for
* banked registers has to be done first.
*/
- if (!(r2->type & ARM_CP_NO_RAW)) {
- assert(!raw_accessors_invalid(r2));
+ if (!(r->type & ARM_CP_NO_RAW)) {
+ assert(!raw_accessors_invalid(r));
}
- g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2);
+ g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r);
+}
+
+static void add_cpreg_to_hashtable_aa32(ARMCPU *cpu, ARMCPRegInfo *r)
+{
+ /*
+ * Under AArch32 CP registers can be common
+ * (same for secure and non-secure world) or banked.
+ */
+ ARMCPRegInfo *r_s;
+ bool is64 = r->type & ARM_CP_64BIT;
+ uint32_t key = ENCODE_CP_REG(r->cp, is64, 0, r->crn,
+ r->crm, r->opc1, r->opc2);
+
+ assert(!(r->type & ARM_CP_ADD_TLBI_NXS)); /* aa64 only */
+ r->vhe_redir_to_el2 = 0;
+ r->vhe_redir_to_el01 = 0;
+
+ switch (r->secure) {
+ case ARM_CP_SECSTATE_NS:
+ key |= CP_REG_AA32_NS_MASK;
+ /* fall through */
+ case ARM_CP_SECSTATE_S:
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32, r->secure, key);
+ break;
+ case ARM_CP_SECSTATE_BOTH:
+ r_s = alloc_cpreg(r, "_S");
+ add_cpreg_to_hashtable(cpu, r_s, ARM_CP_STATE_AA32,
+ ARM_CP_SECSTATE_S, key);
+
+ key |= CP_REG_AA32_NS_MASK;
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32,
+ ARM_CP_SECSTATE_NS, key);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
+static void add_cpreg_to_hashtable_aa64(ARMCPU *cpu, ARMCPRegInfo *r)
+{
+ uint32_t key = ENCODE_AA64_CP_REG(r->opc0, r->opc1,
+ r->crn, r->crm, r->opc2);
+
+ if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
+ cpu_isar_feature(aa64_xs, cpu)) {
+ /*
+ * This is a TLBI insn which has an NXS variant. The
+ * NXS variant is at the same encoding except that
+ * crn is +1, and has the same behaviour except for
+ * fine-grained trapping. Add the NXS insn here and
+ * then fall through to add the normal register.
+ * add_cpreg_to_hashtable() copies the cpreg struct
+ * and name that it is passed, so it's OK to use
+ * a local struct here.
+ */
+ ARMCPRegInfo *nxs_ri = alloc_cpreg(r, "NXS");
+ uint32_t nxs_key;
+
+ assert(nxs_ri->crn < 0xf);
+ nxs_ri->crn++;
+ /* Also increment the CRN field inside the key value */
+ nxs_key = key + (1 << CP_REG_ARM64_SYSREG_CRN_SHIFT);
+ if (nxs_ri->fgt) {
+ nxs_ri->fgt |= R_FGT_NXS_MASK;
+ }
+
+ add_cpreg_to_hashtable(cpu, nxs_ri, ARM_CP_STATE_AA64,
+ ARM_CP_SECSTATE_NS, nxs_key);
+ }
+
+ if (!r->vhe_redir_to_el01) {
+ assert(!r->vhe_redir_to_el2);
+ } else if (!arm_feature(&cpu->env, ARM_FEATURE_EL2) ||
+ !cpu_isar_feature(aa64_vh, cpu)) {
+ r->vhe_redir_to_el2 = 0;
+ r->vhe_redir_to_el01 = 0;
+ } else {
+ /* Create the FOO_EL12 alias. */
+ ARMCPRegInfo *r2 = alloc_cpreg(r, "2");
+ uint32_t key2 = r->vhe_redir_to_el01;
+
+ /*
+ * Clear EL1 redirection on the FOO_EL1 reg;
+ * Clear EL2 redirection on the FOO_EL12 reg;
+ * Install redirection from FOO_EL12 back to FOO_EL1.
+ */
+ r->vhe_redir_to_el01 = 0;
+ r2->vhe_redir_to_el2 = 0;
+ r2->vhe_redir_to_el01 = key;
-void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
- const ARMCPRegInfo *r, void *opaque)
+ r2->type |= ARM_CP_ALIAS | ARM_CP_NO_RAW;
+ /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */
+ r2->access &= PL2_RW | PL3_RW;
+ /* The new_reg op fields are as per new_key, not the target reg */
+ r2->crn = (key2 & CP_REG_ARM64_SYSREG_CRN_MASK)
+ >> CP_REG_ARM64_SYSREG_CRN_SHIFT;
+ r2->crm = (key2 & CP_REG_ARM64_SYSREG_CRM_MASK)
+ >> CP_REG_ARM64_SYSREG_CRM_SHIFT;
+ r2->opc0 = (key2 & CP_REG_ARM64_SYSREG_OP0_MASK)
+ >> CP_REG_ARM64_SYSREG_OP0_SHIFT;
+ r2->opc1 = (key2 & CP_REG_ARM64_SYSREG_OP1_MASK)
+ >> CP_REG_ARM64_SYSREG_OP1_SHIFT;
+ r2->opc2 = (key2 & CP_REG_ARM64_SYSREG_OP2_MASK)
+ >> CP_REG_ARM64_SYSREG_OP2_SHIFT;
+
+ /* Non-redirected access to this register will abort. */
+ r2->readfn = NULL;
+ r2->writefn = NULL;
+ r2->raw_readfn = NULL;
+ r2->raw_writefn = NULL;
+ r2->accessfn = NULL;
+ r2->fieldoffset = 0;
+
+ /*
+ * If the _EL1 register is redirected to memory by FEAT_NV2,
+ * then it shares the offset with the _EL12 register,
+ * and which one is redirected depends on HCR_EL2.NV1.
+ */
+ if (r2->nv2_redirect_offset) {
+ assert(r2->nv2_redirect_offset & NV2_REDIR_NV1);
+ r2->nv2_redirect_offset &= ~NV2_REDIR_NV1;
+ r2->nv2_redirect_offset |= NV2_REDIR_NO_NV1;
+ }
+ add_cpreg_to_hashtable(cpu, r2, ARM_CP_STATE_AA64,
+ ARM_CP_SECSTATE_NS, key2);
+ }
+
+ add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA64,
+ ARM_CP_SECSTATE_NS, key);
+}
+
+void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r)
{
/*
* Define implementations of coprocessor registers.
@@ -9255,21 +7719,27 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
* bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of
* the register, if any.
*/
- int crm, opc1, opc2;
int crmmin = (r->crm == CP_ANY) ? 0 : r->crm;
int crmmax = (r->crm == CP_ANY) ? 15 : r->crm;
int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1;
int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1;
int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2;
int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2;
- CPState state;
+ int cp = r->cp;
+ ARMCPRegInfo r_const;
+ CPUARMState *env = &cpu->env;
- /* 64 bit registers have only CRm and Opc1 fields */
- assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn)));
+ /*
+ * AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless.
+ * Moreover, the encoding test just following in general prevents
+ * shared encoding so ARM_CP_STATE_BOTH won't work either.
+ */
+ assert(r->state == ARM_CP_STATE_AA32 || !(r->type & ARM_CP_64BIT));
+ /* AArch32 64-bit registers have only CRm and Opc1 fields. */
+ assert(!(r->type & ARM_CP_64BIT) || !(r->opc2 || r->crn));
/* op0 only exists in the AArch64 encodings */
- assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0));
- /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */
- assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT));
+ assert(r->state != ARM_CP_STATE_AA32 || r->opc0 == 0);
+
/*
* This API is only for Arm's system coprocessors (14 and 15) or
* (M-profile or v7A-and-earlier only) for implementation defined
@@ -9280,21 +7750,25 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
*/
switch (r->state) {
case ARM_CP_STATE_BOTH:
- /* 0 has a special meaning, but otherwise the same rules as AA32. */
- if (r->cp == 0) {
+ /*
+ * If the cp field is left unset, assume cp15.
+ * Otherwise apply the same rules as AA32.
+ */
+ if (cp == 0) {
+ cp = 15;
break;
}
/* fall through */
case ARM_CP_STATE_AA32:
if (arm_feature(&cpu->env, ARM_FEATURE_V8) &&
!arm_feature(&cpu->env, ARM_FEATURE_M)) {
- assert(r->cp >= 14 && r->cp <= 15);
+ assert(cp >= 14 && cp <= 15);
} else {
- assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15));
+ assert(cp < 8 || (cp >= 14 && cp <= 15));
}
break;
case ARM_CP_STATE_AA64:
- assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP);
+ assert(cp == 0);
break;
default:
g_assert_not_reached();
@@ -9359,75 +7833,104 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
}
}
- for (crm = crmmin; crm <= crmmax; crm++) {
- for (opc1 = opc1min; opc1 <= opc1max; opc1++) {
- for (opc2 = opc2min; opc2 <= opc2max; opc2++) {
- for (state = ARM_CP_STATE_AA32;
- state <= ARM_CP_STATE_AA64; state++) {
- if (r->state != state && r->state != ARM_CP_STATE_BOTH) {
- continue;
- }
- if ((r->type & ARM_CP_ADD_TLBI_NXS) &&
- cpu_isar_feature(aa64_xs, cpu)) {
- /*
- * This is a TLBI insn which has an NXS variant. The
- * NXS variant is at the same encoding except that
- * crn is +1, and has the same behaviour except for
- * fine-grained trapping. Add the NXS insn here and
- * then fall through to add the normal register.
- * add_cpreg_to_hashtable() copies the cpreg struct
- * and name that it is passed, so it's OK to use
- * a local struct here.
- */
- ARMCPRegInfo nxs_ri = *r;
- g_autofree char *name = g_strdup_printf("%sNXS", r->name);
-
- assert(state == ARM_CP_STATE_AA64);
- assert(nxs_ri.crn < 0xf);
- nxs_ri.crn++;
- if (nxs_ri.fgt) {
- nxs_ri.fgt |= R_FGT_NXS_MASK;
- }
- add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, name);
- }
- if (state == ARM_CP_STATE_AA32) {
- /*
- * Under AArch32 CP registers can be common
- * (same for secure and non-secure world) or banked.
- */
- char *name;
-
- switch (r->secure) {
- case ARM_CP_SECSTATE_S:
- case ARM_CP_SECSTATE_NS:
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- r->secure, crm, opc1, opc2,
- r->name);
- break;
- case ARM_CP_SECSTATE_BOTH:
- name = g_strdup_printf("%s_S", r->name);
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- ARM_CP_SECSTATE_S,
- crm, opc1, opc2, name);
- g_free(name);
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, r->name);
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- /*
- * AArch64 registers get mapped to non-secure instance
- * of AArch32
- */
- add_cpreg_to_hashtable(cpu, r, opaque, state,
- ARM_CP_SECSTATE_NS,
- crm, opc1, opc2, r->name);
- }
+ /*
+ * Eliminate registers that are not present because the EL is missing.
+ * Doing this here makes it easier to put all registers for a given
+ * feature into the same ARMCPRegInfo array and define them all at once.
+ */
+ if (arm_feature(env, ARM_FEATURE_EL3)) {
+ /*
+ * An EL2 register without EL2 but with EL3 is (usually) RES0.
+ * See rule RJFFP in section D1.1.3 of DDI0487H.a.
+ */
+ int min_el = ctz32(r->access) / 2;
+ if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) {
+ if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) {
+ return;
+ }
+ if (!(r->type & ARM_CP_EL3_NO_EL2_KEEP)) {
+ /* This should not have been a very special register. */
+ int old_special = r->type & ARM_CP_SPECIAL_MASK;
+ assert(old_special == 0 || old_special == ARM_CP_NOP);
+
+ r_const = *r;
+
+ /*
+ * Set the special function to CONST, retaining the other flags.
+ * This is important for e.g. ARM_CP_SVE so that we still
+ * take the SVE trap if CPTR_EL3.EZ == 0.
+ */
+ r_const.type = (r->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST;
+ /*
+ * Usually, these registers become RES0, but there are a few
+ * special cases like VPIDR_EL2 which have a constant non-zero
+ * value with writes ignored.
+ */
+ if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) {
+ r_const.resetvalue = 0;
+ }
+ /*
+ * ARM_CP_CONST has precedence, so removing the callbacks and
+ * offsets are not strictly necessary, but it is potentially
+ * less confusing to debug later.
+ */
+ r_const.readfn = NULL;
+ r_const.writefn = NULL;
+ r_const.raw_readfn = NULL;
+ r_const.raw_writefn = NULL;
+ r_const.resetfn = NULL;
+ r_const.fieldoffset = 0;
+ r_const.bank_fieldoffsets[0] = 0;
+ r_const.bank_fieldoffsets[1] = 0;
+
+ r = &r_const;
+ }
+ }
+ } else {
+ CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2)
+ ? PL2_RW : PL1_RW);
+ if ((r->access & max_el) == 0) {
+ return;
+ }
+ }
+
+ for (int crm = crmmin; crm <= crmmax; crm++) {
+ for (int opc1 = opc1min; opc1 <= opc1max; opc1++) {
+ for (int opc2 = opc2min; opc2 <= opc2max; opc2++) {
+ ARMCPRegInfo *r2 = alloc_cpreg(r, NULL);
+ ARMCPRegInfo *r3;
+
+ /*
+ * By convention, for wildcarded registers only the first
+ * entry is used for migration; the others are marked as
+ * ALIAS so we don't try to transfer the register
+ * multiple times.
+ */
+ if (crm != crmmin || opc1 != opc1min || opc2 != opc2min) {
+ r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB;
+ }
+
+ /* Overwrite CP_ANY with the instantiation. */
+ r2->crm = crm;
+ r2->opc1 = opc1;
+ r2->opc2 = opc2;
+
+ switch (r->state) {
+ case ARM_CP_STATE_AA32:
+ add_cpreg_to_hashtable_aa32(cpu, r2);
+ break;
+ case ARM_CP_STATE_AA64:
+ add_cpreg_to_hashtable_aa64(cpu, r2);
+ break;
+ case ARM_CP_STATE_BOTH:
+ r3 = alloc_cpreg(r2, NULL);
+ r2->cp = cp;
+ add_cpreg_to_hashtable_aa32(cpu, r2);
+ r3->cp = 0;
+ add_cpreg_to_hashtable_aa64(cpu, r3);
+ break;
+ default:
+ g_assert_not_reached();
}
}
}
@@ -9435,12 +7938,10 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
}
/* Define a whole list of registers */
-void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs,
- void *opaque, size_t len)
+void define_arm_cp_regs_len(ARMCPU *cpu, const ARMCPRegInfo *regs, size_t len)
{
- size_t i;
- for (i = 0; i < len; ++i) {
- define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque);
+ for (size_t i = 0; i < len; ++i) {
+ define_one_arm_cp_reg(cpu, regs + i);
}
}
@@ -9502,7 +8003,7 @@ uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri)
return 0;
}
-void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque)
+void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *ri)
{
/* Helper coprocessor reset function for do-nothing-on-reset registers */
}
@@ -10569,7 +9070,7 @@ static int aarch64_regnum(CPUARMState *env, int aarch32_reg)
}
}
-static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
+uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
{
uint32_t ret = cpsr_read(env);
@@ -10584,6 +9085,24 @@ static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env)
return ret;
}
+void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val)
+{
+ uint32_t mask;
+
+ /* Save SPSR_ELx.SS into PSTATE. */
+ env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS);
+ val &= ~PSTATE_SS;
+
+ /* Move DIT to the correct location for CPSR */
+ if (val & PSTATE_DIT) {
+ val &= ~PSTATE_DIT;
+ val |= CPSR_DIT;
+ }
+
+ mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar);
+ cpsr_write(env, val, mask, CPSRWriteRaw);
+}
+
static bool syndrome_is_sync_extabt(uint32_t syndrome)
{
/* Return true if this syndrome value is a synchronous external abort */
@@ -10615,9 +9134,9 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
unsigned int new_el = env->exception.target_el;
- target_ulong addr = env->cp15.vbar_el[new_el];
- unsigned int new_mode = aarch64_pstate_mode(new_el, true);
- unsigned int old_mode;
+ vaddr addr = env->cp15.vbar_el[new_el];
+ uint64_t new_mode = aarch64_pstate_mode(new_el, true);
+ uint64_t old_mode;
unsigned int cur_el = arm_current_el(env);
int rt;
@@ -10660,8 +9179,13 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
} else {
addr += 0x600;
}
- } else if (pstate_read(env) & PSTATE_SP) {
- addr += 0x200;
+ } else {
+ if (pstate_read(env) & PSTATE_SP) {
+ addr += 0x200;
+ }
+ if (is_a64(env) && (env->cp15.gcscr_el[new_el] & GCSCR_EXLOCKEN)) {
+ new_mode |= PSTATE_EXLOCK;
+ }
}
switch (cs->exception_index) {
@@ -10765,7 +9289,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
* If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN)
* If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM)
*/
- old_mode = deposit32(old_mode, 2, 2, 2);
+ old_mode = deposit64(old_mode, 2, 2, 2);
}
}
} else {
@@ -10778,7 +9302,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
}
env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode;
- qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode);
+ qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%" PRIx64 "\n", old_mode);
qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n",
env->elr_el[new_el]);
@@ -10832,7 +9356,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs)
env->pc = addr;
- qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n",
+ qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64
+ " PSTATE 0x%" PRIx64 "\n",
new_el, env->pc, pstate_read(env));
}
@@ -10888,7 +9413,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
new_el);
if (qemu_loglevel_mask(CPU_LOG_INT)
&& !excp_is_internal(cs->exception_index)) {
- qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n",
+ qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx64 "\n",
syn_get_ec(env->exception.syndrome),
env->exception.syndrome);
}
@@ -10930,7 +9455,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
arm_call_el_change_hook(cpu);
if (!kvm_enabled()) {
- cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
+ cpu_set_interrupt(cs, CPU_INTERRUPT_EXITTB);
}
}
#endif /* !CONFIG_USER_ONLY */
@@ -11078,21 +9603,34 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
bool el1_is_aa32)
{
uint64_t tcr = regime_tcr(env, mmu_idx);
- bool epd, hpd, tsz_oob, ds, ha, hd;
+ bool epd, hpd, tsz_oob, ds, ha, hd, pie = false;
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
ARMGranuleSize gran;
ARMCPU *cpu = env_archcpu(env);
bool stage2 = regime_is_stage2(mmu_idx);
+ int r_el = regime_el(mmu_idx);
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
tsz = extract32(tcr, 0, 6);
gran = tg0_to_gran_size(extract32(tcr, 14, 2));
if (stage2) {
- /* VTCR_EL2 */
- hpd = false;
+ /*
+ * Stage2 does not have hierarchical permissions.
+ * Thus disabling them makes things easier during ptw.
+ */
+ hpd = true;
+ pie = extract64(tcr, 36, 1) && cpu_isar_feature(aa64_s2pie, cpu);
} else {
hpd = extract32(tcr, 24, 1);
+ if (r_el == 3) {
+ pie = (extract64(tcr, 35, 1)
+ && cpu_isar_feature(aa64_s1pie, cpu));
+ } else {
+ pie = ((env->cp15.tcr2_el[2] & TCR2_PIE)
+ && (!arm_feature(env, ARM_FEATURE_EL3)
+ || (env->cp15.scr_el3 & SCR_TCR2EN)));
+ }
}
epd = false;
sh = extract32(tcr, 12, 2);
@@ -11129,10 +9667,16 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ds = extract64(tcr, 59, 1);
if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) &&
- regime_is_user(env, mmu_idx)) {
+ regime_is_user(mmu_idx)) {
epd = true;
}
+
+ pie = ((env->cp15.tcr2_el[r_el] & TCR2_PIE)
+ && (!arm_feature(env, ARM_FEATURE_EL3)
+ || (env->cp15.scr_el3 & SCR_TCR2EN))
+ && (r_el == 2 || (arm_hcrx_el2_eff(env) & HCRX_TCR2EN)));
}
+ hpd |= pie;
gran = sanitize_gran_size(cpu, gran, stage2);
@@ -11211,6 +9755,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
.ha = ha,
.hd = ha && hd,
.gran = gran,
+ .pie = pie,
};
}
@@ -11325,33 +9870,6 @@ int fp_exception_el(CPUARMState *env, int cur_el)
return 0;
}
-/* Return the exception level we're running at if this is our mmu_idx */
-int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
-{
- if (mmu_idx & ARM_MMU_IDX_M) {
- return mmu_idx & ARM_MMU_IDX_M_PRIV;
- }
-
- switch (mmu_idx) {
- case ARMMMUIdx_E10_0:
- case ARMMMUIdx_E20_0:
- case ARMMMUIdx_E30_0:
- return 0;
- case ARMMMUIdx_E10_1:
- case ARMMMUIdx_E10_1_PAN:
- return 1;
- case ARMMMUIdx_E2:
- case ARMMMUIdx_E20_2:
- case ARMMMUIdx_E20_2_PAN:
- return 2;
- case ARMMMUIdx_E3:
- case ARMMMUIdx_E30_3_PAN:
- return 3;
- default:
- g_assert_not_reached();
- }
-}
-
#ifndef CONFIG_TCG
ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate)
{
@@ -11417,116 +9935,6 @@ ARMMMUIdx arm_mmu_idx(CPUARMState *env)
return arm_mmu_idx_el(env, arm_current_el(env));
}
-static bool mve_no_pred(CPUARMState *env)
-{
- /*
- * Return true if there is definitely no predication of MVE
- * instructions by VPR or LTPSIZE. (Returning false even if there
- * isn't any predication is OK; generated code will just be
- * a little worse.)
- * If the CPU does not implement MVE then this TB flag is always 0.
- *
- * NOTE: if you change this logic, the "recalculate s->mve_no_pred"
- * logic in gen_update_fp_context() needs to be updated to match.
- *
- * We do not include the effect of the ECI bits here -- they are
- * tracked in other TB flags. This simplifies the logic for
- * "when did we emit code that changes the MVE_NO_PRED TB flag
- * and thus need to end the TB?".
- */
- if (cpu_isar_feature(aa32_mve, env_archcpu(env))) {
- return false;
- }
- if (env->v7m.vpr) {
- return false;
- }
- if (env->v7m.ltpsize < 4) {
- return false;
- }
- return true;
-}
-
-void cpu_get_tb_cpu_state(CPUARMState *env, vaddr *pc,
- uint64_t *cs_base, uint32_t *pflags)
-{
- CPUARMTBFlags flags;
-
- assert_hflags_rebuild_correctly(env);
- flags = env->hflags;
-
- if (EX_TBFLAG_ANY(flags, AARCH64_STATE)) {
- *pc = env->pc;
- if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
- DP_TBFLAG_A64(flags, BTYPE, env->btype);
- }
- } else {
- *pc = env->regs[15];
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
- FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
- != env->v7m.secure) {
- DP_TBFLAG_M32(flags, FPCCR_S_WRONG, 1);
- }
-
- if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
- (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
- (env->v7m.secure &&
- !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
- /*
- * ASPEN is set, but FPCA/SFPA indicate that there is no
- * active FP context; we must create a new FP context before
- * executing any FP insn.
- */
- DP_TBFLAG_M32(flags, NEW_FP_CTXT_NEEDED, 1);
- }
-
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
- if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
- DP_TBFLAG_M32(flags, LSPACT, 1);
- }
-
- if (mve_no_pred(env)) {
- DP_TBFLAG_M32(flags, MVE_NO_PRED, 1);
- }
- } else {
- /*
- * Note that XSCALE_CPAR shares bits with VECSTRIDE.
- * Note that VECLEN+VECSTRIDE are RES0 for M-profile.
- */
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar);
- } else {
- DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len);
- DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride);
- }
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) {
- DP_TBFLAG_A32(flags, VFPEN, 1);
- }
- }
-
- DP_TBFLAG_AM32(flags, THUMB, env->thumb);
- DP_TBFLAG_AM32(flags, CONDEXEC, env->condexec_bits);
- }
-
- /*
- * The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
- * states defined in the ARM ARM for software singlestep:
- * SS_ACTIVE PSTATE.SS State
- * 0 x Inactive (the TB flag for SS is always 0)
- * 1 0 Active-pending
- * 1 1 Active-not-pending
- * SS_ACTIVE is set in hflags; PSTATE__SS is computed every TB.
- */
- if (EX_TBFLAG_ANY(flags, SS_ACTIVE) && (env->pstate & PSTATE_SS)) {
- DP_TBFLAG_ANY(flags, PSTATE__SS, 1);
- }
-
- *pflags = flags.flags;
- *cs_base = flags.flags2;
-}
-
-#ifdef TARGET_AARCH64
/*
* The manual says that when SVE is enabled and VQ is widened the
* implementation is allowed to zero the previously inaccessible
@@ -11641,7 +10049,6 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
aarch64_sve_narrow_vq(env, new_len + 1);
}
}
-#endif
#ifndef CONFIG_USER_ONLY
ARMSecuritySpace arm_security_space(CPUARMState *env)