aboutsummaryrefslogtreecommitdiff
path: root/target/arm/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/cpu.c')
-rw-r--r--target/arm/cpu.c737
1 files changed, 202 insertions, 535 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 377791c..3b556f1 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -33,7 +33,6 @@
#endif /* CONFIG_TCG */
#include "internals.h"
#include "cpu-features.h"
-#include "exec/exec-all.h"
#include "exec/target_page.h"
#include "hw/qdev-properties.h"
#if !defined(CONFIG_USER_ONLY)
@@ -53,6 +52,8 @@
#include "target/arm/cpu-qom.h"
#include "target/arm/gtimer.h"
+#include "trace.h"
+
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -143,11 +144,11 @@ static bool arm_cpu_has_work(CPUState *cs)
ARMCPU *cpu = ARM_CPU(cs);
return (cpu->power_state != PSCI_OFF)
- && cs->interrupt_request &
- (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
- | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI
- | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
- | CPU_INTERRUPT_EXITTB);
+ && cpu_test_interrupt(cs,
+ CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
+ | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI
+ | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
+ | CPU_INTERRUPT_EXITTB);
}
#endif /* !CONFIG_USER_ONLY */
@@ -193,14 +194,8 @@ static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
* This is basically only used for fields in non-core coprocessors
* (like the pxa2xx ones).
*/
- if (!ri->fieldoffset) {
- return;
- }
-
- if (cpreg_field_is_64bit(ri)) {
- CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
- } else {
- CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
+ if (ri->fieldoffset) {
+ raw_write(&cpu->env, ri, ri->resetvalue);
}
}
@@ -232,6 +227,8 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);
CPUARMState *env = &cpu->env;
+ trace_arm_cpu_reset(arm_cpu_mp_affinity(cpu));
+
if (acc->parent_phases.hold) {
acc->parent_phases.hold(obj, type);
}
@@ -248,10 +245,6 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
cpu->power_state = cs->start_powered_off ? PSCI_OFF : PSCI_ON;
- if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
- }
-
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
/* 64 bit CPUs always start in 64 bit mode */
env->aarch64 = true;
@@ -318,6 +311,10 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
env->cp15.mdscr_el1 |= 1 << 12;
/* Enable FEAT_MOPS */
env->cp15.sctlr_el[1] |= SCTLR_MSCEN;
+ /* For Linux, GCSPR_EL0 is always readable. */
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ env->cp15.gcscr_el[0] = GCSCRE0_NTR;
+ }
#else
/* Reset into the highest available EL */
if (arm_feature(env, ARM_FEATURE_EL3)) {
@@ -350,11 +347,6 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
env->uncached_cpsr = ARM_CPU_MODE_USR;
/* For user mode we must enable access to coprocessors */
env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
- if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- env->cp15.c15_cpar = 3;
- } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
- env->cp15.c15_cpar = 1;
- }
#else
/*
@@ -554,11 +546,15 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type)
set_flush_inputs_to_zero(1, &env->vfp.fp_status[FPST_STD]);
set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD]);
set_default_nan_mode(1, &env->vfp.fp_status[FPST_STD_F16]);
+ set_default_nan_mode(1, &env->vfp.fp_status[FPST_ZA]);
+ set_default_nan_mode(1, &env->vfp.fp_status[FPST_ZA_F16]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A32_F16]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_A64_F16]);
+ arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_ZA_F16]);
arm_set_default_fp_behaviours(&env->vfp.fp_status[FPST_STD_F16]);
arm_set_ah_fp_behaviours(&env->vfp.fp_status[FPST_AH]);
set_flush_to_zero(1, &env->vfp.fp_status[FPST_AH]);
@@ -586,6 +582,8 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
bool have_el3 = arm_feature(env, ARM_FEATURE_EL3);
bool have_el2 = arm_feature(env, ARM_FEATURE_EL2);
+ trace_arm_emulate_firmware_reset(arm_cpu_mp_affinity(cpu), target_el);
+
/*
* Check we have the EL we're aiming for. If that is the
* highest implemented EL, then cpu_reset has already done
@@ -631,6 +629,9 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK;
env->cp15.scr_el3 |= SCR_ENTP2;
env->vfp.smcr_el[3] = 0xf;
+ if (cpu_isar_feature(aa64_sme2, cpu)) {
+ env->vfp.smcr_el[3] |= R_SMCR_EZT0_MASK;
+ }
}
if (cpu_isar_feature(aa64_hcx, cpu)) {
env->cp15.scr_el3 |= SCR_HXEN;
@@ -638,6 +639,22 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
if (cpu_isar_feature(aa64_fgt, cpu)) {
env->cp15.scr_el3 |= SCR_FGTEN;
}
+ if (cpu_isar_feature(aa64_gcs, cpu)) {
+ env->cp15.scr_el3 |= SCR_GCSEN;
+ }
+ if (cpu_isar_feature(aa64_tcr2, cpu)) {
+ env->cp15.scr_el3 |= SCR_TCR2EN;
+ }
+ if (cpu_isar_feature(aa64_sctlr2, cpu)) {
+ env->cp15.scr_el3 |= SCR_SCTLR2EN;
+ }
+ if (cpu_isar_feature(aa64_s1pie, cpu) ||
+ cpu_isar_feature(aa64_s2pie, cpu)) {
+ env->cp15.scr_el3 |= SCR_PIEN;
+ }
+ if (cpu_isar_feature(aa64_mec, cpu)) {
+ env->cp15.scr_el3 |= SCR_MECEN;
+ }
}
if (target_el == 2) {
@@ -674,376 +691,6 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el)
}
-#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
-
-static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
- unsigned int target_el,
- unsigned int cur_el, bool secure,
- uint64_t hcr_el2)
-{
- CPUARMState *env = cpu_env(cs);
- bool pstate_unmasked;
- bool unmasked = false;
- bool allIntMask = false;
-
- /*
- * Don't take exceptions if they target a lower EL.
- * This check should catch any exceptions that would not be taken
- * but left pending.
- */
- if (cur_el > target_el) {
- return false;
- }
-
- if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) &&
- env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) {
- allIntMask = env->pstate & PSTATE_ALLINT ||
- ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) &&
- (env->pstate & PSTATE_SP));
- }
-
- switch (excp_idx) {
- case EXCP_NMI:
- pstate_unmasked = !allIntMask;
- break;
-
- case EXCP_VINMI:
- if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
- /* VINMIs are only taken when hypervized. */
- return false;
- }
- return !allIntMask;
- case EXCP_VFNMI:
- if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
- /* VFNMIs are only taken when hypervized. */
- return false;
- }
- return !allIntMask;
- case EXCP_FIQ:
- pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask);
- break;
-
- case EXCP_IRQ:
- pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask);
- break;
-
- case EXCP_VFIQ:
- if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
- /* VFIQs are only taken when hypervized. */
- return false;
- }
- return !(env->daif & PSTATE_F) && (!allIntMask);
- case EXCP_VIRQ:
- if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
- /* VIRQs are only taken when hypervized. */
- return false;
- }
- return !(env->daif & PSTATE_I) && (!allIntMask);
- case EXCP_VSERR:
- if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) {
- /* VIRQs are only taken when hypervized. */
- return false;
- }
- return !(env->daif & PSTATE_A);
- default:
- g_assert_not_reached();
- }
-
- /*
- * Use the target EL, current execution state and SCR/HCR settings to
- * determine whether the corresponding CPSR bit is used to mask the
- * interrupt.
- */
- if ((target_el > cur_el) && (target_el != 1)) {
- /* Exceptions targeting a higher EL may not be maskable */
- if (arm_feature(env, ARM_FEATURE_AARCH64)) {
- switch (target_el) {
- case 2:
- /*
- * According to ARM DDI 0487H.a, an interrupt can be masked
- * when HCR_E2H and HCR_TGE are both set regardless of the
- * current Security state. Note that we need to revisit this
- * part again once we need to support NMI.
- */
- if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
- unmasked = true;
- }
- break;
- case 3:
- /* Interrupt cannot be masked when the target EL is 3 */
- unmasked = true;
- break;
- default:
- g_assert_not_reached();
- }
- } else {
- /*
- * The old 32-bit-only environment has a more complicated
- * masking setup. HCR and SCR bits not only affect interrupt
- * routing but also change the behaviour of masking.
- */
- bool hcr, scr;
-
- switch (excp_idx) {
- case EXCP_FIQ:
- /*
- * If FIQs are routed to EL3 or EL2 then there are cases where
- * we override the CPSR.F in determining if the exception is
- * masked or not. If neither of these are set then we fall back
- * to the CPSR.F setting otherwise we further assess the state
- * below.
- */
- hcr = hcr_el2 & HCR_FMO;
- scr = (env->cp15.scr_el3 & SCR_FIQ);
-
- /*
- * When EL3 is 32-bit, the SCR.FW bit controls whether the
- * CPSR.F bit masks FIQ interrupts when taken in non-secure
- * state. If SCR.FW is set then FIQs can be masked by CPSR.F
- * when non-secure but only when FIQs are only routed to EL3.
- */
- scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
- break;
- case EXCP_IRQ:
- /*
- * When EL3 execution state is 32-bit, if HCR.IMO is set then
- * we may override the CPSR.I masking when in non-secure state.
- * The SCR.IRQ setting has already been taken into consideration
- * when setting the target EL, so it does not have a further
- * affect here.
- */
- hcr = hcr_el2 & HCR_IMO;
- scr = false;
- break;
- default:
- g_assert_not_reached();
- }
-
- if ((scr || hcr) && !secure) {
- unmasked = true;
- }
- }
- }
-
- /*
- * The PSTATE bits only mask the interrupt if we have not overridden the
- * ability above.
- */
- return unmasked || pstate_unmasked;
-}
-
-static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
-{
- CPUARMState *env = cpu_env(cs);
- uint32_t cur_el = arm_current_el(env);
- bool secure = arm_is_secure(env);
- uint64_t hcr_el2 = arm_hcr_el2_eff(env);
- uint32_t target_el;
- uint32_t excp_idx;
-
- /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */
-
- if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) &&
- (arm_sctlr(env, cur_el) & SCTLR_NMI)) {
- if (interrupt_request & CPU_INTERRUPT_NMI) {
- excp_idx = EXCP_NMI;
- target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- goto found;
- }
- }
- if (interrupt_request & CPU_INTERRUPT_VINMI) {
- excp_idx = EXCP_VINMI;
- target_el = 1;
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- goto found;
- }
- }
- if (interrupt_request & CPU_INTERRUPT_VFNMI) {
- excp_idx = EXCP_VFNMI;
- target_el = 1;
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- goto found;
- }
- }
- } else {
- /*
- * NMI disabled: interrupts with superpriority are handled
- * as if they didn't have it
- */
- if (interrupt_request & CPU_INTERRUPT_NMI) {
- interrupt_request |= CPU_INTERRUPT_HARD;
- }
- if (interrupt_request & CPU_INTERRUPT_VINMI) {
- interrupt_request |= CPU_INTERRUPT_VIRQ;
- }
- if (interrupt_request & CPU_INTERRUPT_VFNMI) {
- interrupt_request |= CPU_INTERRUPT_VFIQ;
- }
- }
-
- if (interrupt_request & CPU_INTERRUPT_FIQ) {
- excp_idx = EXCP_FIQ;
- target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- goto found;
- }
- }
- if (interrupt_request & CPU_INTERRUPT_HARD) {
- excp_idx = EXCP_IRQ;
- target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- goto found;
- }
- }
- if (interrupt_request & CPU_INTERRUPT_VIRQ) {
- excp_idx = EXCP_VIRQ;
- target_el = 1;
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- goto found;
- }
- }
- if (interrupt_request & CPU_INTERRUPT_VFIQ) {
- excp_idx = EXCP_VFIQ;
- target_el = 1;
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- goto found;
- }
- }
- if (interrupt_request & CPU_INTERRUPT_VSERR) {
- excp_idx = EXCP_VSERR;
- target_el = 1;
- if (arm_excp_unmasked(cs, excp_idx, target_el,
- cur_el, secure, hcr_el2)) {
- /* Taking a virtual abort clears HCR_EL2.VSE */
- env->cp15.hcr_el2 &= ~HCR_VSE;
- cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
- goto found;
- }
- }
- return false;
-
- found:
- cs->exception_index = excp_idx;
- env->exception.target_el = target_el;
- cs->cc->tcg_ops->do_interrupt(cs);
- return true;
-}
-
-#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
-
-void arm_cpu_update_virq(ARMCPU *cpu)
-{
- /*
- * Update the interrupt level for VIRQ, which is the logical OR of
- * the HCR_EL2.VI bit and the input line level from the GIC.
- */
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
-
- bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) &&
- !(arm_hcrx_el2_eff(env) & HCRX_VINMI)) ||
- (env->irq_line_state & CPU_INTERRUPT_VIRQ);
-
- if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
- if (new_state) {
- cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
- }
- }
-}
-
-void arm_cpu_update_vfiq(ARMCPU *cpu)
-{
- /*
- * Update the interrupt level for VFIQ, which is the logical OR of
- * the HCR_EL2.VF bit and the input line level from the GIC.
- */
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
-
- bool new_state = ((arm_hcr_el2_eff(env) & HCR_VF) &&
- !(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) ||
- (env->irq_line_state & CPU_INTERRUPT_VFIQ);
-
- if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
- if (new_state) {
- cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
- }
- }
-}
-
-void arm_cpu_update_vinmi(ARMCPU *cpu)
-{
- /*
- * Update the interrupt level for VINMI, which is the logical OR of
- * the HCRX_EL2.VINMI bit and the input line level from the GIC.
- */
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
-
- bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) &&
- (arm_hcrx_el2_eff(env) & HCRX_VINMI)) ||
- (env->irq_line_state & CPU_INTERRUPT_VINMI);
-
- if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VINMI) != 0)) {
- if (new_state) {
- cpu_interrupt(cs, CPU_INTERRUPT_VINMI);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_VINMI);
- }
- }
-}
-
-void arm_cpu_update_vfnmi(ARMCPU *cpu)
-{
- /*
- * Update the interrupt level for VFNMI, which is the HCRX_EL2.VFNMI bit.
- */
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
-
- bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) &&
- (arm_hcrx_el2_eff(env) & HCRX_VFNMI);
-
- if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFNMI) != 0)) {
- if (new_state) {
- cpu_interrupt(cs, CPU_INTERRUPT_VFNMI);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_VFNMI);
- }
- }
-}
-
-void arm_cpu_update_vserr(ARMCPU *cpu)
-{
- /*
- * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit.
- */
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
-
- bool new_state = env->cp15.hcr_el2 & HCR_VSE;
-
- if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
- if (new_state) {
- cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
- } else {
- cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR);
- }
- }
-}
-
#ifndef CONFIG_USER_ONLY
static void arm_cpu_set_irq(void *opaque, int irq, int level)
{
@@ -1099,37 +746,6 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level)
}
}
-static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
-{
-#ifdef CONFIG_KVM
- ARMCPU *cpu = opaque;
- CPUARMState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
- uint32_t linestate_bit;
- int irq_id;
-
- switch (irq) {
- case ARM_CPU_IRQ:
- irq_id = KVM_ARM_IRQ_CPU_IRQ;
- linestate_bit = CPU_INTERRUPT_HARD;
- break;
- case ARM_CPU_FIQ:
- irq_id = KVM_ARM_IRQ_CPU_FIQ;
- linestate_bit = CPU_INTERRUPT_FIQ;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (level) {
- env->irq_line_state |= linestate_bit;
- } else {
- env->irq_line_state &= ~linestate_bit;
- }
- kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
-#endif
-}
-
static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1203,7 +819,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
info->endian = BFD_ENDIAN_LITTLE;
if (bswap_code(sctlr_b)) {
- info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG;
+ info->endian = target_big_endian() ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG;
}
info->flags &= ~INSN_ARM_BE32;
#ifndef CONFIG_USER_ONLY
@@ -1213,13 +829,11 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
#endif
}
-#ifdef TARGET_AARCH64
-
static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
- uint32_t psr = pstate_read(env);
+ uint64_t psr = pstate_read(env);
int i, j;
int el = arm_current_el(env);
uint64_t hcr = arm_hcr_el2_eff(env);
@@ -1241,7 +855,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
} else {
ns_status = "";
}
- qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c",
+ qemu_fprintf(f, "PSTATE=%016" PRIx64 " %c%c%c%c %sEL%d%c",
psr,
psr & PSTATE_N ? 'N' : '-',
psr & PSTATE_Z ? 'Z' : '-',
@@ -1258,7 +872,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
(FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-'));
}
if (cpu_isar_feature(aa64_bti, cpu)) {
- qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10);
+ qemu_fprintf(f, " BTYPE=%d", (int)(psr & PSTATE_BTYPE) >> 10);
}
qemu_fprintf(f, "%s%s%s",
(hcr & HCR_NV) ? " NV" : "",
@@ -1364,23 +978,14 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
qemu_fprintf(f, "ZA[%0*d]=", svl_lg10, i);
for (j = zcr_len; j >= 0; --j) {
qemu_fprintf(f, "%016" PRIx64 ":%016" PRIx64 "%c",
- env->zarray[i].d[2 * j + 1],
- env->zarray[i].d[2 * j],
+ env->za_state.za[i].d[2 * j + 1],
+ env->za_state.za[i].d[2 * j],
j ? ':' : '\n');
}
}
}
}
-#else
-
-static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
-{
- g_assert_not_reached();
-}
-
-#endif
-
static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -1542,6 +1147,7 @@ static void arm_cpu_initfn(Object *obj)
* 0 means "unset, use the default value". That default might vary depending
* on the CPU type, and is set in the realize fn.
*/
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_gt_cntfrq_property =
DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0);
@@ -1551,7 +1157,6 @@ static const Property arm_cpu_reset_cbar_property =
static const Property arm_cpu_reset_hivecs_property =
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
-#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_el2_property =
DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
@@ -1574,6 +1179,7 @@ static const Property arm_cpu_has_neon_property =
static const Property arm_cpu_has_dsp_property =
DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_mpu_property =
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
@@ -1586,6 +1192,7 @@ static const Property arm_cpu_pmsav7_dregion_property =
DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
pmsav7_dregion,
qdev_prop_uint32, uint32_t);
+#endif
static bool arm_get_pmu(Object *obj, Error **errp)
{
@@ -1610,6 +1217,35 @@ static void arm_set_pmu(Object *obj, bool value, Error **errp)
cpu->has_pmu = value;
}
+static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ return arm_feature(&cpu->env, ARM_FEATURE_AARCH64);
+}
+
+static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+
+ /*
+ * At this time, this property is only allowed if KVM is enabled. This
+ * restriction allows us to avoid fixing up functionality that assumes a
+ * uniform execution state like do_interrupt.
+ */
+ if (value == false) {
+ if (!kvm_enabled() || !kvm_arm_aarch32_supported()) {
+ error_setg(errp, "'aarch64' feature cannot be disabled "
+ "unless KVM is enabled and 32-bit EL1 "
+ "is supported");
+ return;
+ }
+ unset_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ } else {
+ set_feature(&cpu->env, ARM_FEATURE_AARCH64);
+ }
+}
+
unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
{
/*
@@ -1726,7 +1362,7 @@ static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
}
}
-void arm_cpu_post_init(Object *obj)
+static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -1737,6 +1373,14 @@ void arm_cpu_post_init(Object *obj)
*/
arm_cpu_propagate_feature_implications(cpu);
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64,
+ aarch64_cpu_set_aarch64);
+ object_property_set_description(obj, "aarch64",
+ "Set on/off to enable/disable aarch64 "
+ "execution state ");
+ }
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
@@ -1752,7 +1396,6 @@ void arm_cpu_post_init(Object *obj)
OBJ_PROP_FLAG_READWRITE);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
/* Add the has_el3 state CPU property only if EL3 is allowed. This will
* prevent "has_el3" from existing on CPUs which cannot support EL3.
@@ -1824,6 +1467,7 @@ void arm_cpu_post_init(Object *obj)
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
}
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
@@ -1860,8 +1504,6 @@ void arm_cpu_post_init(Object *obj)
&cpu->psci_conduit,
OBJ_PROP_FLAG_READWRITE);
- qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
-
if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
}
@@ -1870,7 +1512,6 @@ void arm_cpu_post_init(Object *obj)
kvm_arm_add_vcpu_properties(cpu);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
cpu_isar_feature(aa64_mte, cpu)) {
object_property_add_link(obj, "tag-memory",
@@ -1888,6 +1529,7 @@ void arm_cpu_post_init(Object *obj)
}
}
#endif
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
}
static void arm_cpu_finalizefn(Object *obj)
@@ -1919,7 +1561,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
{
Error *local_err = NULL;
-#ifdef TARGET_AARCH64
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
arm_cpu_sve_finalize(cpu, &local_err);
if (local_err != NULL) {
@@ -1955,7 +1596,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
return;
}
}
-#endif
if (kvm_enabled()) {
kvm_arm_steal_time_finalize(cpu, &local_err);
@@ -1970,6 +1610,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
ARMCPU *cpu = ARM_CPU(dev);
+ ARMISARegisters *isar = &cpu->isar;
ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
CPUARMState *env = &cpu->env;
Error *local_err = NULL;
@@ -2127,21 +1768,16 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, JSCVT, 0);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, FP, 0xf);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, FPSP, 0);
@@ -2175,7 +1811,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_NEON);
- t = cpu->isar.id_aa64isar0;
+ t = GET_IDREG(isar, ID_AA64ISAR0);
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
@@ -2183,32 +1819,30 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
- cpu->isar.id_aa64isar0 = t;
+ SET_IDREG(isar, ID_AA64ISAR0, t);
- t = cpu->isar.id_aa64isar1;
+ t = GET_IDREG(isar, ID_AA64ISAR1);
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
- cpu->isar.id_aa64isar1 = t;
+ SET_IDREG(isar, ID_AA64ISAR1, t);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, ADVSIMD, 0xf);
- u = cpu->isar.id_isar5;
+ u = GET_IDREG(isar, ID_ISAR5);
u = FIELD_DP32(u, ID_ISAR5, AES, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
- cpu->isar.id_isar5 = u;
+ SET_IDREG(isar, ID_ISAR5, u);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
if (!arm_feature(env, ARM_FEATURE_M)) {
u = cpu->isar.mvfr1;
@@ -2225,16 +1859,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_neon && !cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar0;
- t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
- cpu->isar.id_aa64isar0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR0, FHM, 0);
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, FRINTTS, 0);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
@@ -2251,30 +1880,20 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_THUMB_DSP);
- u = cpu->isar.id_isar1;
- u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
- cpu->isar.id_isar1 = u;
+ FIELD_DP32_IDREG(isar, ID_ISAR1, EXTEND, 1);
- u = cpu->isar.id_isar2;
+ u = GET_IDREG(isar, ID_ISAR2);
u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
- cpu->isar.id_isar2 = u;
+ SET_IDREG(isar, ID_ISAR2, u);
- u = cpu->isar.id_isar3;
+ u = GET_IDREG(isar, ID_ISAR3);
u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
- cpu->isar.id_isar3 = u;
+ SET_IDREG(isar, ID_ISAR3, u);
}
- /*
- * We rely on no XScale CPU having VFP so we can use the same bits in the
- * TB flags field for VECSTRIDE and XSCALE_CPAR.
- */
- assert(arm_feature(env, ARM_FEATURE_AARCH64) ||
- !cpu_isar_feature(aa32_vfp_simd, cpu) ||
- !arm_feature(env, ARM_FEATURE_XSCALE));
-
#ifndef CONFIG_USER_ONLY
{
int pagebits;
@@ -2338,14 +1957,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the security extension feature bits in the processor
* feature registers as well.
*/
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL3, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, SECURITY, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPSDBG, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL3, 0);
/* Disable the realm management extension, which requires EL3. */
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, RME, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, RME, 0);
}
if (!cpu->has_el2) {
@@ -2368,9 +1985,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu);
#endif
} else {
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMUVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, PERFMON, 0);
cpu->pmceid0 = 0;
cpu->pmceid1 = 0;
}
@@ -2380,10 +1996,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the hypervisor feature bits in the processor feature
* registers if we don't have EL2.
*/
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL2, 0);
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
- ID_PFR1, VIRTUALIZATION, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL2, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, VIRTUALIZATION, 0);
}
if (cpu_isar_feature(aa64_mte, cpu)) {
@@ -2402,8 +2016,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
if (tcg_enabled() && cpu->tag_memory == NULL) {
- cpu->isar.id_aa64pfr1 =
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 1);
}
/*
@@ -2411,7 +2024,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* enabled on the guest (i.e mte=off), clear guest's MTE bits."
*/
if (kvm_enabled() && !cpu->kvm_mte) {
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 0);
}
#endif
}
@@ -2431,32 +2044,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* try to access the non-existent system registers for them.
*/
/* FEAT_SPE (Statistical Profiling Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMSVER, 0);
/* FEAT_TRBE (Trace Buffer Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEBUFFER, 0);
/* FEAT_TRF (Self-hosted Trace Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEFILT, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, TRACEFILT, 0);
/* Trace Macrocell system register access */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPTRC, 0);
/* Memory mapped trace */
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, MMAPTRC, 0);
/* FEAT_AMU (Activity Monitors Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0);
- cpu->isar.id_pfr0 =
- FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, AMU, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR0, AMU, 0);
/* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, MPAM, 0);
}
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
@@ -2647,15 +2250,52 @@ static const Property arm_cpu_properties[] = {
static const gchar *arm_gdb_arch_name(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
- return "iwmmxt";
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return "aarch64";
}
return "arm";
}
-#ifndef CONFIG_USER_ONLY
+static const char *arm_gdb_get_core_xml_file(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ if (arm_gdbstub_is_aarch64(cpu)) {
+ return "aarch64-core.xml";
+ }
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ return "arm-m-profile.xml";
+ }
+ return "arm-core.xml";
+}
+
+#ifdef CONFIG_USER_ONLY
+/**
+ * aarch64_untagged_addr:
+ *
+ * Remove any address tag from @x. This is explicitly related to the
+ * linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
+ *
+ * There should be a better place to put this, but we need this in
+ * include/accel/tcg/cpu-ldst.h, and not some place linux-user specific.
+ *
+ * Note that arm-*-user will never set tagged_addr_enable.
+ */
+static vaddr aarch64_untagged_addr(CPUState *cs, vaddr x)
+{
+ CPUARMState *env = cpu_env(cs);
+ if (env->tagged_addr_enable) {
+ /*
+ * TBI is enabled for userspace but not kernelspace addresses.
+ * Only clear the tag if bit 55 is clear.
+ */
+ x &= sextract64(x, 0, 56);
+ }
+ return x;
+}
+#else
#include "hw/core/sysemu-cpu-ops.h"
static const struct SysemuCPUOps arm_sysemu_ops = {
@@ -2670,6 +2310,29 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
#endif
#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
+static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ /*
+ * The Stage2 and Phys indexes are only used for ptw on arm32,
+ * and all pte's are aligned, so we never produce a wrap for these.
+ * Double check that we're not truncating a 40-bit physical address.
+ */
+ assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK));
+
+ if (!is_a64(cpu_env(cs))) {
+ return (uint32_t)result;
+ }
+
+ /*
+ * TODO: For FEAT_CPA2, decide how to we want to resolve
+ * Unpredictable_CPACHECK in AddressIncrement.
+ */
+ return result;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const TCGCPUOps arm_tcg_ops = {
.mttcg_supported = true,
/* ARM processors have a weak memory model */
@@ -2677,6 +2340,7 @@ static const TCGCPUOps arm_tcg_ops = {
.initialize = arm_translate_init,
.translate_code = arm_translate_code,
+ .get_tb_cpu_state = arm_get_tb_cpu_state,
.synchronize_from_tb = arm_cpu_synchronize_from_tb,
.debug_excp_handler = arm_debug_excp_handler,
.restore_state_to_opc = arm_restore_state_to_opc,
@@ -2685,10 +2349,13 @@ static const TCGCPUOps arm_tcg_ops = {
#ifdef CONFIG_USER_ONLY
.record_sigsegv = arm_cpu_record_sigsegv,
.record_sigbus = arm_cpu_record_sigbus,
+ .untagged_addr = aarch64_untagged_addr,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = aprofile_pointer_wrap,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
+ .cpu_exec_reset = cpu_reset,
.do_interrupt = arm_cpu_do_interrupt,
.do_transaction_failed = arm_cpu_do_transaction_failed,
.do_unaligned_access = arm_cpu_do_unaligned_access,
@@ -2699,7 +2366,7 @@ static const TCGCPUOps arm_tcg_ops = {
};
#endif /* CONFIG_TCG */
-static void arm_cpu_class_init(ObjectClass *oc, void *data)
+static void arm_cpu_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(acc);
@@ -2724,6 +2391,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->sysemu_ops = &arm_sysemu_ops;
#endif
cc->gdb_arch_name = arm_gdb_arch_name;
+ cc->gdb_get_core_xml_file = arm_gdb_get_core_xml_file;
cc->gdb_stop_before_watchpoint = true;
cc->disas_set_info = arm_disas_set_info;
@@ -2740,13 +2408,12 @@ static void arm_cpu_instance_init(Object *obj)
arm_cpu_post_init(obj);
}
-static void cpu_register_class_init(ObjectClass *oc, void *data)
+static void cpu_register_class_init(ObjectClass *oc, const void *data)
{
ARMCPUClass *acc = ARM_CPU_CLASS(oc);
CPUClass *cc = CPU_CLASS(acc);
acc->info = data;
- cc->gdb_core_xml_file = "arm-core.xml";
if (acc->info->deprecation_note) {
cc->deprecation_note = acc->info->deprecation_note;
}
@@ -2758,7 +2425,7 @@ void arm_cpu_register(const ARMCPUInfo *info)
.parent = TYPE_ARM_CPU,
.instance_init = arm_cpu_instance_init,
.class_init = info->class_init ?: cpu_register_class_init,
- .class_data = (void *)info,
+ .class_data = info,
};
type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);