aboutsummaryrefslogtreecommitdiff
path: root/target/arm/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/kvm.c')
-rw-r--r--target/arm/kvm.c201
1 files changed, 126 insertions, 75 deletions
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 70f79ed..74fda8b 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -20,17 +20,17 @@
#include "qemu/main-loop.h"
#include "qom/object.h"
#include "qapi/error.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/runstate.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
+#include "system/system.h"
+#include "system/runstate.h"
+#include "system/kvm.h"
+#include "system/kvm_int.h"
#include "kvm_arm.h"
#include "cpu.h"
#include "trace.h"
#include "internals.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
-#include "exec/address-spaces.h"
+#include "system/address-spaces.h"
#include "gdbstub/enums.h"
#include "hw/boards.h"
#include "hw/irq.h"
@@ -39,8 +39,10 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/ghes.h"
#include "target/arm/gtimer.h"
+#include "migration/blocker.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
+ KVM_CAP_INFO(DEVICE_CTRL),
KVM_CAP_LAST_INFO
};
@@ -98,8 +100,7 @@ static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature)
return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature);
}
-bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
- int *fdarray,
+bool kvm_arm_create_scratch_host_vcpu(int *fdarray,
struct kvm_vcpu_init *init)
{
int ret = 0, kvmfd = -1, vmfd = -1, cpufd = -1;
@@ -119,6 +120,21 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
if (vmfd < 0) {
goto err;
}
+
+ /*
+ * The MTE capability must be enabled by the VMM before creating
+ * any VCPUs in order to allow the MTE bits of the ID_AA64PFR1
+ * register to be probed correctly, as they are masked if MTE
+ * is not enabled.
+ */
+ if (kvm_arm_mte_supported()) {
+ KVMState kvm_state;
+
+ kvm_state.fd = kvmfd;
+ kvm_state.vmfd = vmfd;
+ kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0);
+ }
+
cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
if (cpufd < 0) {
goto err;
@@ -133,40 +149,13 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
struct kvm_vcpu_init preferred;
ret = ioctl(vmfd, KVM_ARM_PREFERRED_TARGET, &preferred);
- if (!ret) {
- init->target = preferred.target;
- }
- }
- if (ret >= 0) {
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
if (ret < 0) {
goto err;
}
- } else if (cpus_to_try) {
- /* Old kernel which doesn't know about the
- * PREFERRED_TARGET ioctl: we know it will only support
- * creating one kind of guest CPU which is its preferred
- * CPU type.
- */
- struct kvm_vcpu_init try;
-
- while (*cpus_to_try != QEMU_KVM_ARM_TARGET_NONE) {
- try.target = *cpus_to_try++;
- memcpy(try.features, init->features, sizeof(init->features));
- ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, &try);
- if (ret >= 0) {
- break;
- }
- }
- if (ret < 0) {
- goto err;
- }
- init->target = try.target;
- } else {
- /* Treat a NULL cpus_to_try argument the same as an empty
- * list, which means we will fail the call since this must
- * be an old kernel which doesn't support PREFERRED_TARGET.
- */
+ init->target = preferred.target;
+ }
+ ret = ioctl(cpufd, KVM_ARM_VCPU_INIT, init);
+ if (ret < 0) {
goto err;
}
@@ -242,17 +231,6 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
uint64_t features = 0;
int err;
- /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
- * we know these will only support creating one kind of guest CPU,
- * which is its preferred CPU type. Fortunately these old kernels
- * support only a very limited number of CPUs.
- */
- static const uint32_t cpus_to_try[] = {
- KVM_ARM_TARGET_AEM_V8,
- KVM_ARM_TARGET_FOUNDATION_V8,
- KVM_ARM_TARGET_CORTEX_A57,
- QEMU_KVM_ARM_TARGET_NONE
- };
/*
* target = -1 informs kvm_arm_create_scratch_host_vcpu()
* to use the preferred target
@@ -280,9 +258,10 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
if (kvm_arm_pmu_supported()) {
init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
pmu_supported = true;
+ features |= 1ULL << ARM_FEATURE_PMU;
}
- if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
return false;
}
@@ -448,7 +427,6 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
features |= 1ULL << ARM_FEATURE_V8;
features |= 1ULL << ARM_FEATURE_NEON;
features |= 1ULL << ARM_FEATURE_AARCH64;
- features |= 1ULL << ARM_FEATURE_PMU;
features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
ahcf->features = features;
@@ -675,19 +653,11 @@ static void kvm_arm_set_device_addr(KVMDevice *kd)
{
struct kvm_device_attr *attr = &kd->kdattr;
int ret;
+ uint64_t addr = kd->kda.addr;
- /* If the device control API is available and we have a device fd on the
- * KVMDevice struct, let's use the newer API
- */
- if (kd->dev_fd >= 0) {
- uint64_t addr = kd->kda.addr;
-
- addr |= kd->kda_addr_ormask;
- attr->addr = (uintptr_t)&addr;
- ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
- } else {
- ret = kvm_vm_ioctl(kvm_state, KVM_ARM_SET_DEVICE_ADDR, &kd->kda);
- }
+ addr |= kd->kda_addr_ormask;
+ attr->addr = (uintptr_t)&addr;
+ ret = kvm_device_ioctl(kd->dev_fd, KVM_SET_DEVICE_ATTR, attr);
if (ret < 0) {
fprintf(stderr, "Failed to set device address: %s\n",
@@ -968,13 +938,24 @@ void kvm_arm_cpu_pre_save(ARMCPU *cpu)
}
}
-void kvm_arm_cpu_post_load(ARMCPU *cpu)
+bool kvm_arm_cpu_post_load(ARMCPU *cpu)
{
+ if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
+ return false;
+ }
+ /* Note that it's OK for the TCG side not to know about
+ * every register in the list; KVM is authoritative if
+ * we're using it.
+ */
+ write_list_to_cpustate(cpu);
+
/* KVM virtual time adjustment */
if (cpu->kvm_adjvtime) {
cpu->kvm_vtime = *kvm_arm_get_cpreg_ptr(cpu, KVM_REG_ARM_TIMER_CNT);
cpu->kvm_vtime_dirty = true;
}
+
+ return true;
}
void kvm_arm_reset_vcpu(ARMCPU *cpu)
@@ -1793,6 +1774,11 @@ bool kvm_arm_sve_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
}
+bool kvm_arm_mte_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
+}
+
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
@@ -1821,7 +1807,7 @@ uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
probed = true;
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
+ if (!kvm_arm_create_scratch_host_vcpu(fdarray, &init)) {
error_report("failed to create scratch VCPU with SVE enabled");
abort();
}
@@ -1860,6 +1846,11 @@ static int kvm_arm_sve_set_vls(ARMCPU *cpu)
#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+int kvm_arch_pre_create_vcpu(CPUState *cpu, Error **errp)
+{
+ return 0;
+}
+
int kvm_arch_init_vcpu(CPUState *cs)
{
int ret;
@@ -1868,8 +1859,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
CPUARMState *env = &cpu->env;
uint64_t psciver;
- if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
- !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE) {
error_report("KVM is not supported for this guest CPU type");
return -EINVAL;
}
@@ -1888,13 +1878,8 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (!arm_feature(env, ARM_FEATURE_AARCH64)) {
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
}
- if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
- cpu->has_pmu = false;
- }
if (cpu->has_pmu) {
cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
- } else {
- env->features &= ~(1ULL << ARM_FEATURE_PMU);
}
if (cpu_isar_feature(aa64_sve, cpu)) {
assert(kvm_arm_sve_supported());
@@ -2047,7 +2032,7 @@ static int kvm_arch_put_sve(CPUState *cs)
return 0;
}
-int kvm_arch_put_registers(CPUState *cs, int level)
+int kvm_arch_put_registers(CPUState *cs, int level, Error **errp)
{
uint64_t val;
uint32_t fpr;
@@ -2231,7 +2216,7 @@ static int kvm_arch_get_sve(CPUState *cs)
return 0;
}
-int kvm_arch_get_registers(CPUState *cs)
+int kvm_arch_get_registers(CPUState *cs, Error **errp)
{
uint64_t val;
unsigned int el;
@@ -2378,7 +2363,7 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
*/
if (code == BUS_MCEERR_AR) {
kvm_cpu_synchronize_state(c);
- if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
+ if (!acpi_ghes_memory_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
kvm_inject_arm_sea(c);
} else {
error_report("failed to record the error");
@@ -2422,3 +2407,69 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
}
return 0;
}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ static bool tried_to_enable;
+ static bool succeeded_to_enable;
+ Error *mte_migration_blocker = NULL;
+ ARMCPU *cpu = ARM_CPU(cpuobj);
+ int ret;
+
+ if (!tried_to_enable) {
+ /*
+ * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
+ * sense), and we only want a single migration blocker as well.
+ */
+ tried_to_enable = true;
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
+ return;
+ }
+
+ /* TODO: Add migration support with MTE enabled */
+ error_setg(&mte_migration_blocker,
+ "Live migration disabled due to MTE enabled");
+ if (migrate_add_blocker(&mte_migration_blocker, errp)) {
+ error_free(mte_migration_blocker);
+ return;
+ }
+
+ succeeded_to_enable = true;
+ }
+
+ if (succeeded_to_enable) {
+ cpu->kvm_mte = true;
+ }
+}
+
+void arm_cpu_kvm_set_irq(void *arm_cpu, int irq, int level)
+{
+ ARMCPU *cpu = arm_cpu;
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t linestate_bit;
+ int irq_id;
+
+ switch (irq) {
+ case ARM_CPU_IRQ:
+ irq_id = KVM_ARM_IRQ_CPU_IRQ;
+ linestate_bit = CPU_INTERRUPT_HARD;
+ break;
+ case ARM_CPU_FIQ:
+ irq_id = KVM_ARM_IRQ_CPU_FIQ;
+ linestate_bit = CPU_INTERRUPT_FIQ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (level) {
+ env->irq_line_state |= linestate_bit;
+ } else {
+ env->irq_line_state &= ~linestate_bit;
+ }
+ kvm_arm_set_irq(cs->cpu_index, KVM_ARM_IRQ_TYPE_CPU, irq_id, !!level);
+}