diff options
Diffstat (limited to 'target/arm/kvm.c')
-rw-r--r-- | target/arm/kvm.c | 375 |
1 files changed, 237 insertions, 138 deletions
diff --git a/target/arm/kvm.c b/target/arm/kvm.c index da30bdb..0d57081 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -26,11 +26,12 @@ #include "system/kvm_int.h" #include "kvm_arm.h" #include "cpu.h" +#include "cpu-sysregs.h" #include "trace.h" #include "internals.h" #include "hw/pci/pci.h" #include "exec/memattrs.h" -#include "exec/address-spaces.h" +#include "system/address-spaces.h" #include "gdbstub/enums.h" #include "hw/boards.h" #include "hw/irq.h" @@ -100,8 +101,7 @@ static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature) return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature); } -bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, - int *fdarray, +bool kvm_arm_create_scratch_host_vcpu(int *fdarray, struct kvm_vcpu_init *init) { int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1; @@ -150,40 +150,13 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try, struct kvm_vcpu_init preferred; ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred); - if (!ret) { - init->target = preferred.target; - } - } - if (ret >= 0) { - ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init); if (ret < 0) { goto err; } - } else if (cpus_to_try) { - /* Old kernel which doesn't know about the - * PREFERRED_TARGET ioctl: we know it will only support - * creating one kind of guest CPU which is its preferred - * CPU type. - */ - struct kvm_vcpu_init try; - - while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) { - try.target = *cpus_to_try++; - memcpy(try.features, init->features, sizeof(init->features)); - ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try); - if (ret >= 0) { - break; - } - } - if (ret < 0) { - goto err; - } - init->target = try.target; - } else { - /* Treat a NULL cpus_to_try argument the same as an empty - * list, which means we will fail the call since this must - * be an old kernel which doesn't support PREFERRED_TARGET. - */ + init->target = preferred.target; + } + ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init); + if (ret < 0) { goto err; } @@ -246,6 +219,29 @@ static bool kvm_arm_pauth_supported(void) kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC)); } + +static uint64_t idregs_sysreg_to_kvm_reg(ARMSysRegs sysreg) +{ + return ARM64_SYS_REG((sysreg & CP_REG_ARM64_SYSREG_OP0_MASK) >> CP_REG_ARM64_SYSREG_OP0_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_OP1_MASK) >> CP_REG_ARM64_SYSREG_OP1_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_CRN_MASK) >> CP_REG_ARM64_SYSREG_CRN_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_CRM_MASK) >> CP_REG_ARM64_SYSREG_CRM_SHIFT, + (sysreg & CP_REG_ARM64_SYSREG_OP2_MASK) >> CP_REG_ARM64_SYSREG_OP2_SHIFT); +} + +/* read a sysreg value and store it in the idregs */ +static int get_host_cpu_reg(int fd, ARMHostCPUFeatures *ahcf, + ARMIDRegisterIdx index) +{ + uint64_t *reg; + int ret; + + reg = &ahcf->isar.idregs[index]; + ret = read_sys_reg64(fd, reg, + idregs_sysreg_to_kvm_reg(id_register_sysreg[index])); + return ret; +} + static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) { /* Identify the feature bits corresponding to the host CPU, and @@ -255,21 +251,11 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) */ int fdarray[3]; bool sve_supported; + bool el2_supported; bool pmu_supported = false; uint64_t features = 0; int err; - /* Old kernels may not know about the PREFERRED_TARGET ioctl: however - * we know these will only support creating one kind of guest CPU, - * which is its preferred CPU type. Fortunately these old kernels - * support only a very limited number of CPUs. - */ - static const uint32_t cpus_to_try[] = { - KVM_ARM_TARGET_AEM_V8, - KVM_ARM_TARGET_FOUNDATION_V8, - KVM_ARM_TARGET_CORTEX_A57, - QEMU_KVM_ARM_TARGET_NONE - }; /* * target = -1 informs kvm_arm_create_scratch_host_vcpu() * to use the preferred target @@ -286,6 +272,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) } /* + * Ask for EL2 if supported. + */ + el2_supported = kvm_arm_el2_supported(); + if (el2_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2; + } + + /* * Ask for Pointer Authentication if supported, so that we get * the unsanitized field values for AA64ISAR1_EL1. */ @@ -300,15 +294,15 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) features |= 1ULL << ARM_FEATURE_PMU; } - if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { + if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) { return false; } ahcf->target = init.target; - ahcf->dtb_compatible = "arm,arm-v8"; + ahcf->dtb_compatible = "arm,armv8"; + int fd = fdarray[2]; - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, - ARM64_SYS_REG(3, 0, 0, 4, 0)); + err = get_host_cpu_reg(fd, ahcf, ID_AA64PFR0_EL1_IDX); if (unlikely(err < 0)) { /* * Before v4.15, the kernel only exposed a limited number of system @@ -326,31 +320,21 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * ??? Either of these sounds like too much effort just * to work around running a modern host kernel. */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + SET_IDREG(&ahcf->isar, ID_AA64PFR0, 0x00000011); /* EL1&0, AArch64 only */ err = 0; } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, - ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0, - ARM64_SYS_REG(3, 0, 0, 4, 5)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, - ARM64_SYS_REG(3, 0, 0, 5, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, - ARM64_SYS_REG(3, 0, 0, 5, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, - ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, - ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2, - ARM64_SYS_REG(3, 0, 0, 6, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, - ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, - ARM64_SYS_REG(3, 0, 0, 7, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, - ARM64_SYS_REG(3, 0, 0, 7, 2)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr3, - ARM64_SYS_REG(3, 0, 0, 7, 3)); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64SMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ISAR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64MMFR3_EL1_IDX); /* * Note that if AArch32 support is not present in the host, @@ -359,49 +343,31 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * than skipping the reads and leaving 0, as we must avoid * considering the values in every case. */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, - ARM64_SYS_REG(3, 0, 0, 1, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, - ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, - ARM64_SYS_REG(3, 0, 0, 1, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, - ARM64_SYS_REG(3, 0, 0, 1, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, - ARM64_SYS_REG(3, 0, 0, 1, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, - ARM64_SYS_REG(3, 0, 0, 1, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, - ARM64_SYS_REG(3, 0, 0, 1, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, - ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, - ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, - ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, - ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, - ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, - ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, - ARM64_SYS_REG(3, 0, 0, 2, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, - ARM64_SYS_REG(3, 0, 0, 2, 7)); - - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + err |= get_host_cpu_reg(fd, ahcf, ID_PFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_PFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_DFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR3_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR0_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR3_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR4_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR5_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_ISAR6_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR4_EL1_IDX); + + err |= read_sys_reg32(fd, &ahcf->isar.mvfr0, ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + err |= read_sys_reg32(fd, &ahcf->isar.mvfr1, ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + err |= read_sys_reg32(fd, &ahcf->isar.mvfr2, ARM64_SYS_REG(3, 0, 0, 3, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, - ARM64_SYS_REG(3, 0, 0, 3, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1, - ARM64_SYS_REG(3, 0, 0, 3, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5, - ARM64_SYS_REG(3, 0, 0, 3, 6)); + err |= get_host_cpu_reg(fd, ahcf, ID_PFR2_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_DFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_MMFR5_EL1_IDX); /* * DBGDIDR is a bit complicated because the kernel doesn't @@ -413,14 +379,14 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. * We only do this if the CPU supports AArch32 at EL1. */ - if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { - int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); - int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + if (FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, BRPS); int ctx_cmps = - FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + FIELD_EX64_IDREG(&ahcf->isar, ID_AA64DFR0, CTX_CMPS); int version = 6; /* ARMv8 debug architecture */ bool has_el3 = - !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + !!FIELD_EX32_IDREG(&ahcf->isar, ID_AA64PFR0, EL3); uint32_t dbgdidr = 0; dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); @@ -435,7 +401,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) if (pmu_supported) { /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0, + err |= read_sys_reg64(fd, &ahcf->isar.reset_pmcr_el0, ARM64_SYS_REG(3, 3, 9, 12, 0)); } @@ -447,8 +413,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * enabled SVE support, which resulted in an error rather than RAZ. * So only read the register if we set KVM_ARM_VCPU_SVE above. */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64ZFR0_EL1_IDX); } } @@ -468,6 +433,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) features |= 1ULL << ARM_FEATURE_AARCH64; features |= 1ULL << ARM_FEATURE_GENERIC_TIMER; + if (el2_supported) { + features |= 1ULL << ARM_FEATURE_EL2; + } + ahcf->features = features; return true; @@ -750,17 +719,6 @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group, memory_region_ref(kd->mr); } -static int compare_u64(const void *a, const void *b) -{ - if (*(uint64_t *)a > *(uint64_t *)b) { - return 1; - } - if (*(uint64_t *)a < *(uint64_t *)b) { - return -1; - } - return 0; -} - /* * cpreg_values are sorted in ascending order by KVM register ID * (see kvm_arm_init_cpreg_list). This allows us to cheaply find @@ -932,6 +890,58 @@ bool write_kvmstate_to_list(ARMCPU *cpu) return ok; } +/* pretty-print a KVM register */ +#define CP_REG_ARM64_SYSREG_OP(_reg, _op) \ + ((uint8_t)((_reg & CP_REG_ARM64_SYSREG_ ## _op ## _MASK) >> \ + CP_REG_ARM64_SYSREG_ ## _op ## _SHIFT)) + +static gchar *kvm_print_sve_register_name(uint64_t regidx) +{ + uint16_t sve_reg = regidx & 0x000000000000ffff; + + if (regidx == KVM_REG_ARM64_SVE_VLS) { + return g_strdup_printf("SVE VLS"); + } + /* zreg, preg, ffr */ + switch (sve_reg & 0xfc00) { + case 0: + return g_strdup_printf("SVE zreg n:%d slice:%d", + (sve_reg & 0x03e0) >> 5, sve_reg & 0x001f); + case 0x04: + return g_strdup_printf("SVE preg n:%d slice:%d", + (sve_reg & 0x01e0) >> 5, sve_reg & 0x001f); + case 0x06: + return g_strdup_printf("SVE ffr slice:%d", sve_reg & 0x001f); + default: + return g_strdup_printf("SVE ???"); + } +} + +static gchar *kvm_print_register_name(uint64_t regidx) +{ + switch ((regidx & KVM_REG_ARM_COPROC_MASK)) { + case KVM_REG_ARM_CORE: + return g_strdup_printf("core reg %"PRIx64, regidx); + case KVM_REG_ARM_DEMUX: + return g_strdup_printf("demuxed reg %"PRIx64, regidx); + case KVM_REG_ARM64_SYSREG: + return g_strdup_printf("op0:%d op1:%d crn:%d crm:%d op2:%d", + CP_REG_ARM64_SYSREG_OP(regidx, OP0), + CP_REG_ARM64_SYSREG_OP(regidx, OP1), + CP_REG_ARM64_SYSREG_OP(regidx, CRN), + CP_REG_ARM64_SYSREG_OP(regidx, CRM), + CP_REG_ARM64_SYSREG_OP(regidx, OP2)); + case KVM_REG_ARM_FW: + return g_strdup_printf("fw reg %d", (int)(regidx & 0xffff)); + case KVM_REG_ARM64_SVE: + return kvm_print_sve_register_name(regidx); + case KVM_REG_ARM_FW_FEAT_BMAP: + return g_strdup_printf("fw feat reg %d", (int)(regidx & 0xffff)); + default: + return g_strdup_printf("%"PRIx64, regidx); + } +} + bool write_list_to_kvmstate(ARMCPU *cpu, int level) { CPUState *cs = CPU(cpu); @@ -959,11 +969,45 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) g_assert_not_reached(); } if (ret) { + gchar *reg_str = kvm_print_register_name(regidx); + /* We might fail for "unknown register" and also for * "you tried to set a register which is constant with * a different value from what it actually contains". */ ok = false; + switch (ret) { + case -ENOENT: + error_report("Could not set register %s: unknown to KVM", + reg_str); + break; + case -EINVAL: + if ((regidx & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { + if (!kvm_get_one_reg(cs, regidx, &v32)) { + error_report("Could not set register %s to %x (is %x)", + reg_str, (uint32_t)cpu->cpreg_values[i], + v32); + } else { + error_report("Could not set register %s to %x", + reg_str, (uint32_t)cpu->cpreg_values[i]); + } + } else /* U64 */ { + uint64_t v64; + + if (!kvm_get_one_reg(cs, regidx, &v64)) { + error_report("Could not set register %s to %"PRIx64" (is %"PRIx64")", + reg_str, cpu->cpreg_values[i], v64); + } else { + error_report("Could not set register %s to %"PRIx64, + reg_str, cpu->cpreg_values[i]); + } + } + break; + default: + error_report("Could not set register %s: %s", + reg_str, strerror(-ret)); + } + g_free(reg_str); } } return ok; @@ -977,13 +1021,24 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu) } } -void kvm_arm_cpu_post_load(ARMCPU *cpu) +bool kvm_arm_cpu_post_load(ARMCPU *cpu) { + if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) { + return false; + } + /* Note that it's OK for the TCG side not to know about + * every register in the list; KVM is authoritative if + * we're using it. + */ + write_list_to_cpustate(cpu); + /* KVM virtual time adjustment */ if (cpu->kvm_adjvtime) { cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT); cpu->kvm_vtime_dirty = true; } + + return true; } void kvm_arm_reset_vcpu(ARMCPU *cpu) @@ -1797,6 +1852,11 @@ bool kvm_arm_aarch32_supported(void) return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT); } +bool kvm_arm_el2_supported(void) +{ + return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL2); +} + bool kvm_arm_sve_supported(void) { return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE); @@ -1835,7 +1895,7 @@ uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu) probed = true; - if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) { + if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) { error_report("failed to create scratch VCPU with SVE enabled"); abort(); } @@ -1874,6 +1934,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu) #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 +int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp) +{ + return 0; +} + int kvm_arch_init_vcpu(CPUState *cs) { int ret; @@ -1882,8 +1947,7 @@ int kvm_arch_init_vcpu(CPUState *cs) CPUARMState *env = &cpu->env; uint64_t psciver; - if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE || - !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) { + if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) { error_report("KVM is not supported for this guest CPU type"); return -EINVAL; } @@ -1913,6 +1977,9 @@ int kvm_arch_init_vcpu(CPUState *cs) cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS | 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC); } + if (cpu->has_el2 && kvm_arm_el2_supported()) { + cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_HAS_EL2; + } /* Do KVM_ARM_VCPU_INIT ioctl */ ret = kvm_arm_vcpu_init(cpu); @@ -2056,7 +2123,7 @@ static int kvm_arch_put_sve(CPUState *cs) return 0; } -int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp) { uint64_t val; uint32_t fpr; @@ -2366,10 +2433,12 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) { ram_addr_t ram_addr; hwaddr paddr; + AcpiGhesState *ags; assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); - if (acpi_ghes_present() && addr) { + ags = acpi_ghes_get_state(); + if (ags && addr) { ram_addr = qemu_ram_addr_from_host(addr); if (ram_addr != RAM_ADDR_INVALID && kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { @@ -2387,7 +2456,8 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) */ if (code == BUS_MCEERR_AR) { kvm_cpu_synchronize_state(c); - if (!acpi_ghes_memory_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + if (!acpi_ghes_memory_errors(ags, ACPI_HEST_SRC_ID_SYNC, + paddr)) { kvm_inject_arm_sea(c); } else { error_report("failed to record the error"); @@ -2468,3 +2538,32 @@ void kvm_arm_enable_mte(Object *cpuobj, Error **errp) cpu->kvm_mte = true; } } + +void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level) +{ + ARMCPU *cpu = arm_cpu; + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + uint32_t linestate_bit; + int irq_id; + + switch (irq) { + case ARM_CPU_IRQ: + irq_id = KVM_ARM_IRQ_CPU_IRQ; + linestate_bit = CPU_INTERRUPT_HARD; + break; + case ARM_CPU_FIQ: + irq_id = KVM_ARM_IRQ_CPU_FIQ; + linestate_bit = CPU_INTERRUPT_FIQ; + break; + default: + g_assert_not_reached(); + } + + if (level) { + env->irq_line_state |= linestate_bit; + } else { + env->irq_line_state &= ~linestate_bit; + } + kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level); +} |