aboutsummaryrefslogtreecommitdiff
path: root/target/arm/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/cpu.c')
-rw-r--r--target/arm/cpu.c150
1 files changed, 73 insertions, 77 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index ca5ed78..ebac86f 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1500,6 +1500,7 @@ static void arm_cpu_initfn(Object *obj)
* 0 means "unset, use the default value". That default might vary depending
* on the CPU type, and is set in the realize fn.
*/
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_gt_cntfrq_property =
DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0);
@@ -1509,7 +1510,6 @@ static const Property arm_cpu_reset_cbar_property =
static const Property arm_cpu_reset_hivecs_property =
DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);
-#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_el2_property =
DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);
@@ -1532,6 +1532,7 @@ static const Property arm_cpu_has_neon_property =
static const Property arm_cpu_has_dsp_property =
DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true);
+#ifndef CONFIG_USER_ONLY
static const Property arm_cpu_has_mpu_property =
DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);
@@ -1544,6 +1545,7 @@ static const Property arm_cpu_pmsav7_dregion_property =
DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
pmsav7_dregion,
qdev_prop_uint32, uint32_t);
+#endif
static bool arm_get_pmu(Object *obj, Error **errp)
{
@@ -1713,7 +1715,7 @@ static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
}
}
-void arm_cpu_post_init(Object *obj)
+static void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -1731,6 +1733,7 @@ void arm_cpu_post_init(Object *obj)
"Set on/off to enable/disable aarch64 "
"execution state ");
}
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property);
@@ -1746,7 +1749,6 @@ void arm_cpu_post_init(Object *obj)
OBJ_PROP_FLAG_READWRITE);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
/* Add the has_el3 state CPU property only if EL3 is allowed. This will
* prevent "has_el3" from existing on CPUs which cannot support EL3.
@@ -1818,6 +1820,7 @@ void arm_cpu_post_init(Object *obj)
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property);
}
+#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property);
if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
@@ -1854,8 +1857,6 @@ void arm_cpu_post_init(Object *obj)
&cpu->psci_conduit,
OBJ_PROP_FLAG_READWRITE);
- qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
-
if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) {
qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property);
}
@@ -1864,7 +1865,6 @@ void arm_cpu_post_init(Object *obj)
kvm_arm_add_vcpu_properties(cpu);
}
-#ifndef CONFIG_USER_ONLY
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) &&
cpu_isar_feature(aa64_mte, cpu)) {
object_property_add_link(obj, "tag-memory",
@@ -1882,6 +1882,7 @@ void arm_cpu_post_init(Object *obj)
}
}
#endif
+ qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property);
}
static void arm_cpu_finalizefn(Object *obj)
@@ -1962,6 +1963,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
ARMCPU *cpu = ARM_CPU(dev);
+ ARMISARegisters *isar = &cpu->isar;
ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
CPUARMState *env = &cpu->env;
Error *local_err = NULL;
@@ -2119,21 +2121,16 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, JSCVT, 0);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, FP, 0xf);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, FPSP, 0);
@@ -2167,7 +2164,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_NEON);
- t = cpu->isar.id_aa64isar0;
+ t = GET_IDREG(isar, ID_AA64ISAR0);
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0);
@@ -2175,32 +2172,30 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0);
t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0);
- cpu->isar.id_aa64isar0 = t;
+ SET_IDREG(isar, ID_AA64ISAR0, t);
- t = cpu->isar.id_aa64isar1;
+ t = GET_IDREG(isar, ID_AA64ISAR1);
t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0);
t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0);
- cpu->isar.id_aa64isar1 = t;
+ SET_IDREG(isar, ID_AA64ISAR1, t);
- t = cpu->isar.id_aa64pfr0;
- t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf);
- cpu->isar.id_aa64pfr0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, ADVSIMD, 0xf);
- u = cpu->isar.id_isar5;
+ u = GET_IDREG(isar, ID_ISAR5);
u = FIELD_DP32(u, ID_ISAR5, AES, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA1, 0);
u = FIELD_DP32(u, ID_ISAR5, SHA2, 0);
u = FIELD_DP32(u, ID_ISAR5, RDM, 0);
u = FIELD_DP32(u, ID_ISAR5, VCMA, 0);
- cpu->isar.id_isar5 = u;
+ SET_IDREG(isar, ID_ISAR5, u);
- u = cpu->isar.id_isar6;
+ u = GET_IDREG(isar, ID_ISAR6);
u = FIELD_DP32(u, ID_ISAR6, DP, 0);
u = FIELD_DP32(u, ID_ISAR6, FHM, 0);
u = FIELD_DP32(u, ID_ISAR6, BF16, 0);
u = FIELD_DP32(u, ID_ISAR6, I8MM, 0);
- cpu->isar.id_isar6 = u;
+ SET_IDREG(isar, ID_ISAR6, u);
if (!arm_feature(env, ARM_FEATURE_M)) {
u = cpu->isar.mvfr1;
@@ -2217,16 +2212,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
}
if (!cpu->has_neon && !cpu->has_vfp) {
- uint64_t t;
uint32_t u;
- t = cpu->isar.id_aa64isar0;
- t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0);
- cpu->isar.id_aa64isar0 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR0, FHM, 0);
- t = cpu->isar.id_aa64isar1;
- t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0);
- cpu->isar.id_aa64isar1 = t;
+ FIELD_DP64_IDREG(isar, ID_AA64ISAR1, FRINTTS, 0);
u = cpu->isar.mvfr0;
u = FIELD_DP32(u, MVFR0, SIMDREG, 0);
@@ -2243,19 +2233,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
unset_feature(env, ARM_FEATURE_THUMB_DSP);
- u = cpu->isar.id_isar1;
- u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1);
- cpu->isar.id_isar1 = u;
+ FIELD_DP32_IDREG(isar, ID_ISAR1, EXTEND, 1);
- u = cpu->isar.id_isar2;
+ u = GET_IDREG(isar, ID_ISAR2);
u = FIELD_DP32(u, ID_ISAR2, MULTU, 1);
u = FIELD_DP32(u, ID_ISAR2, MULTS, 1);
- cpu->isar.id_isar2 = u;
+ SET_IDREG(isar, ID_ISAR2, u);
- u = cpu->isar.id_isar3;
+ u = GET_IDREG(isar, ID_ISAR3);
u = FIELD_DP32(u, ID_ISAR3, SIMD, 1);
u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0);
- cpu->isar.id_isar3 = u;
+ SET_IDREG(isar, ID_ISAR3, u);
}
@@ -2330,14 +2318,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the security extension feature bits in the processor
* feature registers as well.
*/
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0);
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL3, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, SECURITY, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPSDBG, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL3, 0);
/* Disable the realm management extension, which requires EL3. */
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, RME, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, RME, 0);
}
if (!cpu->has_el2) {
@@ -2360,9 +2346,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu);
#endif
} else {
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0);
- cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMUVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, PERFMON, 0);
cpu->pmceid0 = 0;
cpu->pmceid1 = 0;
}
@@ -2372,10 +2357,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* Disable the hypervisor feature bits in the processor feature
* registers if we don't have EL2.
*/
- cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0,
- ID_AA64PFR0, EL2, 0);
- cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1,
- ID_PFR1, VIRTUALIZATION, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL2, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR1, VIRTUALIZATION, 0);
}
if (cpu_isar_feature(aa64_mte, cpu)) {
@@ -2394,8 +2377,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
if (tcg_enabled() && cpu->tag_memory == NULL) {
- cpu->isar.id_aa64pfr1 =
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 1);
}
/*
@@ -2403,7 +2385,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* enabled on the guest (i.e mte=off), clear guest's MTE bits."
*/
if (kvm_enabled() && !cpu->kvm_mte) {
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 0);
}
#endif
}
@@ -2423,32 +2405,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
* try to access the non-existent system registers for them.
*/
/* FEAT_SPE (Statistical Profiling Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMSVER, 0);
/* FEAT_TRBE (Trace Buffer Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEBUFFER, 0);
/* FEAT_TRF (Self-hosted Trace Extension) */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEFILT, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, TRACEFILT, 0);
/* Trace Macrocell system register access */
- cpu->isar.id_aa64dfr0 =
- FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0);
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEVER, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, COPTRC, 0);
/* Memory mapped trace */
- cpu->isar.id_dfr0 =
- FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0);
+ FIELD_DP32_IDREG(isar, ID_DFR0, MMAPTRC, 0);
/* FEAT_AMU (Activity Monitors Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0);
- cpu->isar.id_pfr0 =
- FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, AMU, 0);
+ FIELD_DP32_IDREG(isar, ID_PFR0, AMU, 0);
/* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */
- cpu->isar.id_aa64pfr0 =
- FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0);
+ FIELD_DP64_IDREG(isar, ID_AA64PFR0, MPAM, 0);
}
/* MPU can be configured out of a PMSA CPU either by setting has-mpu
@@ -2672,7 +2644,7 @@ static const char *arm_gdb_get_core_xml_file(CPUState *cs)
* linux syscall TIF_TAGGED_ADDR setting, not TBI in general.
*
* There should be a better place to put this, but we need this in
- * include/exec/cpu_ldst.h, and not some place linux-user specific.
+ * include/accel/tcg/cpu-ldst.h, and not some place linux-user specific.
*
* Note that arm-*-user will never set tagged_addr_enable.
*/
@@ -2703,6 +2675,29 @@ static const struct SysemuCPUOps arm_sysemu_ops = {
#endif
#ifdef CONFIG_TCG
+#ifndef CONFIG_USER_ONLY
+static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx,
+ vaddr result, vaddr base)
+{
+ /*
+ * The Stage2 and Phys indexes are only used for ptw on arm32,
+ * and all pte's are aligned, so we never produce a wrap for these.
+ * Double check that we're not truncating a 40-bit physical address.
+ */
+ assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK));
+
+ if (!is_a64(cpu_env(cs))) {
+ return (uint32_t)result;
+ }
+
+ /*
+ * TODO: For FEAT_CPA2, decide how to we want to resolve
+ * Unpredictable_CPACHECK in AddressIncrement.
+ */
+ return result;
+}
+#endif /* !CONFIG_USER_ONLY */
+
static const TCGCPUOps arm_tcg_ops = {
.mttcg_supported = true,
/* ARM processors have a weak memory model */
@@ -2722,6 +2717,7 @@ static const TCGCPUOps arm_tcg_ops = {
.untagged_addr = aarch64_untagged_addr,
#else
.tlb_fill_align = arm_cpu_tlb_fill_align,
+ .pointer_wrap = aprofile_pointer_wrap,
.cpu_exec_interrupt = arm_cpu_exec_interrupt,
.cpu_exec_halt = arm_cpu_exec_halt,
.cpu_exec_reset = cpu_reset,