diff options
Diffstat (limited to 'target/arm/kvm.c')
-rw-r--r-- | target/arm/kvm.c | 168 |
1 files changed, 94 insertions, 74 deletions
diff --git a/target/arm/kvm.c b/target/arm/kvm.c index a2791aa..6672344 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -26,6 +26,7 @@ #include "system/kvm_int.h" #include "kvm_arm.h" #include "cpu.h" +#include "cpu-sysregs.h" #include "trace.h" #include "internals.h" #include "hw/pci/pci.h" @@ -218,6 +219,29 @@ static bool kvm_arm_pauth_supported(void) kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); } + +static uint64_t idregs_sysreg_to_kvm_reg(ARMSysRegs sysreg) +{ + return ARM64_SYS_REG((sysreg & CP_REG_ARM64_SYSREG_OP0_MASK) >> CP_REG_ARM64_SYSREG_OP0_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_OP1_MASK) >> CP_REG_ARM64_SYSREG_OP1_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_CRN_MASK) >> CP_REG_ARM64_SYSREG_CRN_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_CRM_MASK) >> CP_REG_ARM64_SYSREG_CRM_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_OP2_MASK) >> CP_REG_ARM64_SYSREG_OP2_SHIFT); +} + +/* read a sysreg value and store it in the idregs */ +static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf, + ARMIDRegisterIdx index) +{ + uint64_t *reg; + int ret; + + reg = &ahcf->isar.idregs[index]; + ret = read_sys_reg64(fd, reg, + idregs_sysreg_to_kvm_reg(id_register_sysreg[index])); + return ret; +} + static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) { /* Identify the feature bits corresponding to the host CPU, and @@ -227,6 +251,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) */ int fdarray[3]; bool sve_supported; + bool el2_supported; bool pmu_supported = false; uint64_t features = 0; int err; @@ -247,6 +272,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) } /* + * Ask for EL2 if supported. + */ + el2_supported = kvm_arm_el2_supported(); + if (el2_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2; + } + + /* * Ask for Pointer Authentication if supported, so that we get * the unsanitized field values for AA64ISAR1_EL1. */ @@ -266,10 +299,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) } ahcf->target = init.target; - ahcf->dtb_compatible = "arm,arm-v8"; + ahcf->dtb_compatible = "arm,armv8"; + int fd = fdarray[2]; - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, - ARM64_SYS_REG(3, 0, 0, 4, 0)); + err = get_host_cpu_reg(fd, ahcf, ID_AA64PFR0_EL1_IDX); if (unlikely(err < 0)) { /* * Before v4.15, the kernel only exposed a limited number of system @@ -287,31 +320,20 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * ??? Either of these sounds like too much effort just * to work around running a modern host kernel. */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + SET_IDREG(&ahcf->isar, ID_AA64PFR0, 0x00000011); /* EL1&0, AArch64 only */ err = 0; } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, - ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, - ARM64_SYS_REG(3, 0, 0, 4, 5)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, - ARM64_SYS_REG(3, 0, 0, 5, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, - ARM64_SYS_REG(3, 0, 0, 5, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, - ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, - ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, - ARM64_SYS_REG(3, 0, 0, 6, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, - ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, - ARM64_SYS_REG(3, 0, 0, 7, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, - ARM64_SYS_REG(3, 0, 0, 7, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3, - ARM64_SYS_REG(3, 0, 0, 7, 3)); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64SMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR3_EL1_IDX); /* * Note that if AArch32 support is not present in the host, @@ -320,49 +342,31 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * than skipping the reads and leaving 0, as we must avoid * considering the values in every case. */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, - ARM64_SYS_REG(3, 0, 0, 1, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, - ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, - ARM64_SYS_REG(3, 0, 0, 1, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, - ARM64_SYS_REG(3, 0, 0, 1, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, - ARM64_SYS_REG(3, 0, 0, 1, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, - ARM64_SYS_REG(3, 0, 0, 1, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, - ARM64_SYS_REG(3, 0, 0, 1, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, - ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, - ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, - ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, - ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, - ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, - ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, - ARM64_SYS_REG(3, 0, 0, 2, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, - ARM64_SYS_REG(3, 0, 0, 2, 7)); - - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + err |= get_host_cpu_reg(fd, ahcf, ID_PFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_PFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_DFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR3_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR3_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR4_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR5_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR6_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR4_EL1_IDX); + + err |= read_sys_reg32(fd, &ahcf->isar.mvfr0, ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + err |= read_sys_reg32(fd, &ahcf->isar.mvfr1, ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + err |= read_sys_reg32(fd, &ahcf->isar.mvfr2, ARM64_SYS_REG(3, 0, 0, 3, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, - ARM64_SYS_REG(3, 0, 0, 3, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, - ARM64_SYS_REG(3, 0, 0, 3, 6)); + err |= get_host_cpu_reg(fd, ahcf, ID_PFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_DFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR5_EL1_IDX); /* * DBGDIDR is a bit complicated because the kernel doesn't @@ -374,14 +378,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. * We only do this if the CPU supports AArch32 at EL1. */ - if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { - int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); - int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + if (FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, BRPS); int ctx_cmps = - FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, CTX_CMPS); int version = 6; /* ARMv8 debug architecture */ bool has_el3 = - !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + !!FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL3); uint32_t dbgdidr = 0; dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); @@ -396,7 +400,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) if (pmu_supported) { /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + err |= read_sys_reg64(fd, &ahcf->isar.reset_pmcr_el0, ARM64_SYS_REG(3, 3, 9, 12, 0)); } @@ -408,8 +412,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * enabled SVE support, which resulted in an error rather than RAZ. * So only read the register if we set KVM_ARM_VCPU_SVE above. */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ZFR0_EL1_IDX); } } @@ -429,6 +432,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) features |= 1ULL << ARM_FEATURE_AARCH64; features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; + if (el2_supported) { + features |= 1ULL << ARM_FEATURE_EL2; + } + ahcf->features = features; return true; @@ -1769,6 +1776,11 @@ bool kvm_arm_aarch32_supported(void) return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); } +bool kvm_arm_el2_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL2); +} + bool kvm_arm_sve_supported(void) { return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); @@ -1846,6 +1858,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu) #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { int ret; @@ -1884,6 +1901,9 @@ int kvm_arch_init_vcpu(CPUState *cs) cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); } + if (cpu->has_el2 && kvm_arm_el2_supported()) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2; + } /* Do KVM_ARM_VCPU_INIT ioctl */ ret = kvm_arm_vcpu_init(cpu); |