diff options
Diffstat (limited to 'target/arm/cpu.c')
-rw-r--r-- | target/arm/cpu.c | 304 |
1 files changed, 171 insertions, 133 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 01786ac..a59a5b5 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -23,6 +23,7 @@ #include "qemu/timer.h" #include "qemu/log.h" #include "exec/page-vary.h" +#include "exec/tswap.h" #include "target/arm/idau.h" #include "qemu/module.h" #include "qapi/error.h" @@ -33,7 +34,7 @@ #endif /* CONFIG_TCG */ #include "internals.h" #include "cpu-features.h" -#include "exec/exec-all.h" +#include "exec/target_page.h" #include "hw/qdev-properties.h" #if !defined(CONFIG_USER_ONLY) #include "hw/loader.h" @@ -121,6 +122,12 @@ void arm_restore_state_to_opc(CPUState *cs, env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT; } } + +int arm_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + return arm_env_mmu_index(cpu_env(cs)); +} + #endif /* CONFIG_TCG */ #ifndef CONFIG_USER_ONLY @@ -144,11 +151,6 @@ static bool arm_cpu_has_work(CPUState *cs) } #endif /* !CONFIG_USER_ONLY */ -static int arm_cpu_mmu_index(CPUState *cs, bool ifetch) -{ - return arm_env_mmu_index(cpu_env(cs)); -} - void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void *opaque) { @@ -1097,37 +1099,6 @@ static void arm_cpu_set_irq(void *opaque, int irq, int level) } } -static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level) -{ -#ifdef CONFIG_KVM - ARMCPU *cpu = opaque; - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - uint32_t linestate_bit; - int irq_id; - - switch (irq) { - case ARM_CPU_IRQ: - irq_id = KVM_ARM_IRQ_CPU_IRQ; - linestate_bit = CPU_INTERRUPT_HARD; - break; - case ARM_CPU_FIQ: - irq_id = KVM_ARM_IRQ_CPU_FIQ; - linestate_bit = CPU_INTERRUPT_FIQ; - break; - default: - g_assert_not_reached(); - } - - if (level) { - env->irq_line_state |= linestate_bit; - } else { - env->irq_line_state &= ~linestate_bit; - } - kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level); -#endif -} - static bool arm_cpu_virtio_is_big_endian(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); @@ -1201,7 +1172,7 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) info->endian = BFD_ENDIAN_LITTLE; if (bswap_code(sctlr_b)) { - info->endian = TARGET_BIG_ENDIAN ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG; + info->endian = target_big_endian() ? BFD_ENDIAN_LITTLE : BFD_ENDIAN_BIG; } info->flags &= ~INSN_ARM_BE32; #ifndef CONFIG_USER_ONLY @@ -1211,8 +1182,6 @@ static void arm_disas_set_info(CPUState *cpu, disassemble_info *info) #endif } -#ifdef TARGET_AARCH64 - static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) { ARMCPU *cpu = ARM_CPU(cs); @@ -1370,15 +1339,6 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } -#else - -static inline void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) -{ - g_assert_not_reached(); -} - -#endif - static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags) { ARMCPU *cpu = ARM_CPU(cs); @@ -1540,6 +1500,7 @@ static void arm_cpu_initfn(Object *obj) * 0 means "unset, use the default value". That default might vary depending * on the CPU type, and is set in the realize fn. */ +#ifndef CONFIG_USER_ONLY static const Property arm_cpu_gt_cntfrq_property = DEFINE_PROP_UINT64("cntfrq", ARMCPU, gt_cntfrq_hz, 0); @@ -1549,7 +1510,6 @@ static const Property arm_cpu_reset_cbar_property = static const Property arm_cpu_reset_hivecs_property = DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false); -#ifndef CONFIG_USER_ONLY static const Property arm_cpu_has_el2_property = DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true); @@ -1572,6 +1532,7 @@ static const Property arm_cpu_has_neon_property = static const Property arm_cpu_has_dsp_property = DEFINE_PROP_BOOL("dsp", ARMCPU, has_dsp, true); +#ifndef CONFIG_USER_ONLY static const Property arm_cpu_has_mpu_property = DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true); @@ -1584,6 +1545,7 @@ static const Property arm_cpu_pmsav7_dregion_property = DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU, pmsav7_dregion, qdev_prop_uint32, uint32_t); +#endif static bool arm_get_pmu(Object *obj, Error **errp) { @@ -1608,6 +1570,35 @@ static void arm_set_pmu(Object *obj, bool value, Error **errp) cpu->has_pmu = value; } +static bool aarch64_cpu_get_aarch64(Object *obj, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + + return arm_feature(&cpu->env, ARM_FEATURE_AARCH64); +} + +static void aarch64_cpu_set_aarch64(Object *obj, bool value, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + + /* + * At this time, this property is only allowed if KVM is enabled. This + * restriction allows us to avoid fixing up functionality that assumes a + * uniform execution state like do_interrupt. + */ + if (value == false) { + if (!kvm_enabled() || !kvm_arm_aarch32_supported()) { + error_setg(errp, "'aarch64' feature cannot be disabled " + "unless KVM is enabled and 32-bit EL1 " + "is supported"); + return; + } + unset_feature(&cpu->env, ARM_FEATURE_AARCH64); + } else { + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + } +} + unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) { /* @@ -1724,7 +1715,7 @@ static void arm_cpu_propagate_feature_implications(ARMCPU *cpu) } } -void arm_cpu_post_init(Object *obj) +static void arm_cpu_post_init(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1735,6 +1726,14 @@ void arm_cpu_post_init(Object *obj) */ arm_cpu_propagate_feature_implications(cpu); + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + object_property_add_bool(obj, "aarch64", aarch64_cpu_get_aarch64, + aarch64_cpu_set_aarch64); + object_property_set_description(obj, "aarch64", + "Set on/off to enable/disable aarch64 " + "execution state "); + } +#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property); @@ -1750,7 +1749,6 @@ void arm_cpu_post_init(Object *obj) OBJ_PROP_FLAG_READWRITE); } -#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) { /* Add the has_el3 state CPU property only if EL3 is allowed. This will * prevent "has_el3" from existing on CPUs which cannot support EL3. @@ -1822,6 +1820,7 @@ void arm_cpu_post_init(Object *obj) qdev_property_add_static(DEVICE(obj), &arm_cpu_has_dsp_property); } +#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) { qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property); if (arm_feature(&cpu->env, ARM_FEATURE_V7)) { @@ -1858,8 +1857,6 @@ void arm_cpu_post_init(Object *obj) &cpu->psci_conduit, OBJ_PROP_FLAG_READWRITE); - qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property); - if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) { qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property); } @@ -1868,7 +1865,6 @@ void arm_cpu_post_init(Object *obj) kvm_arm_add_vcpu_properties(cpu); } -#ifndef CONFIG_USER_ONLY if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && cpu_isar_feature(aa64_mte, cpu)) { object_property_add_link(obj, "tag-memory", @@ -1886,6 +1882,7 @@ void arm_cpu_post_init(Object *obj) } } #endif + qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property); } static void arm_cpu_finalizefn(Object *obj) @@ -1917,7 +1914,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp) { Error *local_err = NULL; -#ifdef TARGET_AARCH64 if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { arm_cpu_sve_finalize(cpu, &local_err); if (local_err != NULL) { @@ -1953,7 +1949,6 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp) return; } } -#endif if (kvm_enabled()) { kvm_arm_steal_time_finalize(cpu, &local_err); @@ -1968,6 +1963,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) { CPUState *cs = CPU(dev); ARMCPU *cpu = ARM_CPU(dev); + ARMISARegisters *isar = &cpu->isar; ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev); CPUARMState *env = &cpu->env; Error *local_err = NULL; @@ -2125,21 +2121,16 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } if (!cpu->has_vfp) { - uint64_t t; uint32_t u; - t = cpu->isar.id_aa64isar1; - t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0); - cpu->isar.id_aa64isar1 = t; + FIELD_DP64_IDREG(isar, ID_AA64ISAR1, JSCVT, 0); - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf); - cpu->isar.id_aa64pfr0 = t; + FIELD_DP64_IDREG(isar, ID_AA64PFR0, FP, 0xf); - u = cpu->isar.id_isar6; + u = GET_IDREG(isar, ID_ISAR6); u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0); u = FIELD_DP32(u, ID_ISAR6, BF16, 0); - cpu->isar.id_isar6 = u; + SET_IDREG(isar, ID_ISAR6, u); u = cpu->isar.mvfr0; u = FIELD_DP32(u, MVFR0, FPSP, 0); @@ -2173,7 +2164,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_NEON); - t = cpu->isar.id_aa64isar0; + t = GET_IDREG(isar, ID_AA64ISAR0); t = FIELD_DP64(t, ID_AA64ISAR0, AES, 0); t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 0); t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 0); @@ -2181,32 +2172,30 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 0); t = FIELD_DP64(t, ID_AA64ISAR0, SM4, 0); t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0); - cpu->isar.id_aa64isar0 = t; + SET_IDREG(isar, ID_AA64ISAR0, t); - t = cpu->isar.id_aa64isar1; + t = GET_IDREG(isar, ID_AA64ISAR1); t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0); t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0); t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0); - cpu->isar.id_aa64isar1 = t; + SET_IDREG(isar, ID_AA64ISAR1, t); - t = cpu->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf); - cpu->isar.id_aa64pfr0 = t; + FIELD_DP64_IDREG(isar, ID_AA64PFR0, ADVSIMD, 0xf); - u = cpu->isar.id_isar5; + u = GET_IDREG(isar, ID_ISAR5); u = FIELD_DP32(u, ID_ISAR5, AES, 0); u = FIELD_DP32(u, ID_ISAR5, SHA1, 0); u = FIELD_DP32(u, ID_ISAR5, SHA2, 0); u = FIELD_DP32(u, ID_ISAR5, RDM, 0); u = FIELD_DP32(u, ID_ISAR5, VCMA, 0); - cpu->isar.id_isar5 = u; + SET_IDREG(isar, ID_ISAR5, u); - u = cpu->isar.id_isar6; + u = GET_IDREG(isar, ID_ISAR6); u = FIELD_DP32(u, ID_ISAR6, DP, 0); u = FIELD_DP32(u, ID_ISAR6, FHM, 0); u = FIELD_DP32(u, ID_ISAR6, BF16, 0); u = FIELD_DP32(u, ID_ISAR6, I8MM, 0); - cpu->isar.id_isar6 = u; + SET_IDREG(isar, ID_ISAR6, u); if (!arm_feature(env, ARM_FEATURE_M)) { u = cpu->isar.mvfr1; @@ -2223,16 +2212,11 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } if (!cpu->has_neon && !cpu->has_vfp) { - uint64_t t; uint32_t u; - t = cpu->isar.id_aa64isar0; - t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0); - cpu->isar.id_aa64isar0 = t; + FIELD_DP64_IDREG(isar, ID_AA64ISAR0, FHM, 0); - t = cpu->isar.id_aa64isar1; - t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0); - cpu->isar.id_aa64isar1 = t; + FIELD_DP64_IDREG(isar, ID_AA64ISAR1, FRINTTS, 0); u = cpu->isar.mvfr0; u = FIELD_DP32(u, MVFR0, SIMDREG, 0); @@ -2249,19 +2233,17 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_THUMB_DSP); - u = cpu->isar.id_isar1; - u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1); - cpu->isar.id_isar1 = u; + FIELD_DP32_IDREG(isar, ID_ISAR1, EXTEND, 1); - u = cpu->isar.id_isar2; + u = GET_IDREG(isar, ID_ISAR2); u = FIELD_DP32(u, ID_ISAR2, MULTU, 1); u = FIELD_DP32(u, ID_ISAR2, MULTS, 1); - cpu->isar.id_isar2 = u; + SET_IDREG(isar, ID_ISAR2, u); - u = cpu->isar.id_isar3; + u = GET_IDREG(isar, ID_ISAR3); u = FIELD_DP32(u, ID_ISAR3, SIMD, 1); u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0); - cpu->isar.id_isar3 = u; + SET_IDREG(isar, ID_ISAR3, u); } @@ -2336,14 +2318,12 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * Disable the security extension feature bits in the processor * feature registers as well. */ - cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, ID_PFR1, SECURITY, 0); - cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPSDBG, 0); - cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, - ID_AA64PFR0, EL3, 0); + FIELD_DP32_IDREG(isar, ID_PFR1, SECURITY, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, COPSDBG, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL3, 0); /* Disable the realm management extension, which requires EL3. */ - cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, - ID_AA64PFR0, RME, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, RME, 0); } if (!cpu->has_el2) { @@ -2366,9 +2346,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu); #endif } else { - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0); - cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMUVER, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, PERFMON, 0); cpu->pmceid0 = 0; cpu->pmceid1 = 0; } @@ -2378,10 +2357,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * Disable the hypervisor feature bits in the processor feature * registers if we don't have EL2. */ - cpu->isar.id_aa64pfr0 = FIELD_DP64(cpu->isar.id_aa64pfr0, - ID_AA64PFR0, EL2, 0); - cpu->isar.id_pfr1 = FIELD_DP32(cpu->isar.id_pfr1, - ID_PFR1, VIRTUALIZATION, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, EL2, 0); + FIELD_DP32_IDREG(isar, ID_PFR1, VIRTUALIZATION, 0); } if (cpu_isar_feature(aa64_mte, cpu)) { @@ -2400,8 +2377,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * This matches Cortex-A710 BROADCASTMTE input being LOW. */ if (tcg_enabled() && cpu->tag_memory == NULL) { - cpu->isar.id_aa64pfr1 = - FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1); + FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 1); } /* @@ -2409,7 +2385,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * enabled on the guest (i.e mte=off), clear guest's MTE bits." */ if (kvm_enabled() && !cpu->kvm_mte) { - FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR1, MTE, 0); } #endif } @@ -2429,32 +2405,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * try to access the non-existent system registers for them. */ /* FEAT_SPE (Statistical Profiling Extension) */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, PMSVER, 0); /* FEAT_TRBE (Trace Buffer Extension) */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEBUFFER, 0); /* FEAT_TRF (Self-hosted Trace Extension) */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0); - cpu->isar.id_dfr0 = - FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, TRACEFILT, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEFILT, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, TRACEFILT, 0); /* Trace Macrocell system register access */ - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEVER, 0); - cpu->isar.id_dfr0 = - FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, COPTRC, 0); + FIELD_DP64_IDREG(isar, ID_AA64DFR0, TRACEVER, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, COPTRC, 0); /* Memory mapped trace */ - cpu->isar.id_dfr0 = - FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, MMAPTRC, 0); + FIELD_DP32_IDREG(isar, ID_DFR0, MMAPTRC, 0); /* FEAT_AMU (Activity Monitors Extension) */ - cpu->isar.id_aa64pfr0 = - FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, AMU, 0); - cpu->isar.id_pfr0 = - FIELD_DP32(cpu->isar.id_pfr0, ID_PFR0, AMU, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, AMU, 0); + FIELD_DP32_IDREG(isar, ID_PFR0, AMU, 0); /* FEAT_MPAM (Memory Partitioning and Monitoring Extension) */ - cpu->isar.id_aa64pfr0 = - FIELD_DP64(cpu->isar.id_aa64pfr0, ID_AA64PFR0, MPAM, 0); + FIELD_DP64_IDREG(isar, ID_AA64PFR0, MPAM, 0); } /* MPU can be configured out of a PMSA CPU either by setting has-mpu @@ -2647,13 +2613,54 @@ static const gchar *arm_gdb_arch_name(CPUState *cs) ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; + if (arm_gdbstub_is_aarch64(cpu)) { + return "aarch64"; + } if (arm_feature(env, ARM_FEATURE_IWMMXT)) { return "iwmmxt"; } return "arm"; } -#ifndef CONFIG_USER_ONLY +static const char *arm_gdb_get_core_xml_file(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + if (arm_gdbstub_is_aarch64(cpu)) { + return "aarch64-core.xml"; + } + if (arm_feature(env, ARM_FEATURE_M)) { + return "arm-m-profile.xml"; + } + return "arm-core.xml"; +} + +#ifdef CONFIG_USER_ONLY +/** + * aarch64_untagged_addr: + * + * Remove any address tag from @x. This is explicitly related to the + * linux syscall TIF_TAGGED_ADDR setting, not TBI in general. + * + * There should be a better place to put this, but we need this in + * include/exec/cpu_ldst.h, and not some place linux-user specific. + * + * Note that arm-*-user will never set tagged_addr_enable. + */ +static vaddr aarch64_untagged_addr(CPUState *cs, vaddr x) +{ + CPUARMState *env = cpu_env(cs); + if (env->tagged_addr_enable) { + /* + * TBI is enabled for userspace but not kernelspace addresses. + * Only clear the tag if bit 55 is clear. + */ + x &= sextract64(x, 0, 56); + } + return x; +} +#else #include "hw/core/sysemu-cpu-ops.h" static const struct SysemuCPUOps arm_sysemu_ops = { @@ -2668,20 +2675,52 @@ static const struct SysemuCPUOps arm_sysemu_ops = { #endif #ifdef CONFIG_TCG +#ifndef CONFIG_USER_ONLY +static vaddr aprofile_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + /* + * The Stage2 and Phys indexes are only used for ptw on arm32, + * and all pte's are aligned, so we never produce a wrap for these. + * Double check that we're not truncating a 40-bit physical address. + */ + assert((unsigned)mmu_idx < (ARMMMUIdx_Stage2_S & ARM_MMU_IDX_COREIDX_MASK)); + + if (!is_a64(cpu_env(cs))) { + return (uint32_t)result; + } + + /* + * TODO: For FEAT_CPA2, decide how to we want to resolve + * Unpredictable_CPACHECK in AddressIncrement. + */ + return result; +} +#endif /* !CONFIG_USER_ONLY */ + static const TCGCPUOps arm_tcg_ops = { + .mttcg_supported = true, + /* ARM processors have a weak memory model */ + .guest_default_memory_order = 0, + .initialize = arm_translate_init, .translate_code = arm_translate_code, + .get_tb_cpu_state = arm_get_tb_cpu_state, .synchronize_from_tb = arm_cpu_synchronize_from_tb, .debug_excp_handler = arm_debug_excp_handler, .restore_state_to_opc = arm_restore_state_to_opc, + .mmu_index = arm_cpu_mmu_index, #ifdef CONFIG_USER_ONLY .record_sigsegv = arm_cpu_record_sigsegv, .record_sigbus = arm_cpu_record_sigbus, + .untagged_addr = aarch64_untagged_addr, #else .tlb_fill_align = arm_cpu_tlb_fill_align, + .pointer_wrap = aprofile_pointer_wrap, .cpu_exec_interrupt = arm_cpu_exec_interrupt, .cpu_exec_halt = arm_cpu_exec_halt, + .cpu_exec_reset = cpu_reset, .do_interrupt = arm_cpu_do_interrupt, .do_transaction_failed = arm_cpu_do_transaction_failed, .do_unaligned_access = arm_cpu_do_unaligned_access, @@ -2692,7 +2731,7 @@ static const TCGCPUOps arm_tcg_ops = { }; #endif /* CONFIG_TCG */ -static void arm_cpu_class_init(ObjectClass *oc, void *data) +static void arm_cpu_class_init(ObjectClass *oc, const void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(acc); @@ -2708,7 +2747,6 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) &acc->parent_phases); cc->class_by_name = arm_cpu_class_by_name; - cc->mmu_index = arm_cpu_mmu_index; cc->dump_state = arm_cpu_dump_state; cc->set_pc = arm_cpu_set_pc; cc->get_pc = arm_cpu_get_pc; @@ -2718,6 +2756,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) cc->sysemu_ops = &arm_sysemu_ops; #endif cc->gdb_arch_name = arm_gdb_arch_name; + cc->gdb_get_core_xml_file = arm_gdb_get_core_xml_file; cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = arm_disas_set_info; @@ -2734,13 +2773,12 @@ static void arm_cpu_instance_init(Object *obj) arm_cpu_post_init(obj); } -static void cpu_register_class_init(ObjectClass *oc, void *data) +static void cpu_register_class_init(ObjectClass *oc, const void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); CPUClass *cc = CPU_CLASS(acc); acc->info = data; - cc->gdb_core_xml_file = "arm-core.xml"; if (acc->info->deprecation_note) { cc->deprecation_note = acc->info->deprecation_note; } @@ -2752,7 +2790,7 @@ void arm_cpu_register(const ARMCPUInfo *info) .parent = TYPE_ARM_CPU, .instance_init = arm_cpu_instance_init, .class_init = info->class_init ?: cpu_register_class_init, - .class_data = (void *)info, + .class_data = info, }; type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name); |