aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCornelia Huck <cohuck@redhat.com>2024-10-29 12:54:40 +0000
committerPeter Maydell <peter.maydell@linaro.org>2024-10-29 12:54:40 +0000
commit918d0de0728c3840e3c459202f9bdbcc690a5727 (patch)
treef692572361dbc997f31dc1c1d3d6175598466447
parentfdf250e5a37830615e324017cb3a503e84b3712c (diff)
downloadqemu-918d0de0728c3840e3c459202f9bdbcc690a5727.zip
qemu-918d0de0728c3840e3c459202f9bdbcc690a5727.tar.gz
qemu-918d0de0728c3840e3c459202f9bdbcc690a5727.tar.bz2
arm/kvm: add support for MTE
Extend the 'mte' property for the virt machine to cover KVM as well. For KVM, we don't allocate tag memory, but instead enable the capability. If MTE has been enabled, we need to disable migration, as we do not yet have a way to migrate the tags as well. Therefore, MTE will stay off with KVM unless requested explicitly. [gankulkarni: This patch is rework of commit b320e21c48 which broke TCG since it made the TCG -cpu max report the presence of MTE to the guest even if the board hadn't enabled MTE by wiring up the tag RAM. This meant that if the guest then tried to use MTE QEMU would segfault accessing the non-existent tag RAM.] Signed-off-by: Cornelia Huck <cohuck@redhat.com> Reviewed-by: Gustavo Romero <gustavo.romero@linaro.org> Signed-off-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com> Message-id: 20241008114302.4855-1-gankulkarni@os.amperecomputing.com Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--hw/arm/virt.c72
-rw-r--r--target/arm/cpu.c14
-rw-r--r--target/arm/cpu.h2
-rw-r--r--target/arm/kvm.c58
-rw-r--r--target/arm/kvm_arm.h19
5 files changed, 132 insertions, 33 deletions
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 8b2b991..1a381e9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -2213,7 +2213,7 @@ static void machvirt_init(MachineState *machine)
exit(1);
}
- if (vms->mte && (kvm_enabled() || hvf_enabled())) {
+ if (vms->mte && hvf_enabled()) {
error_report("mach-virt: %s does not support providing "
"MTE to the guest CPU",
current_accel_name());
@@ -2283,39 +2283,51 @@ static void machvirt_init(MachineState *machine)
}
if (vms->mte) {
- /* Create the memory region only once, but link to all cpus. */
- if (!tag_sysmem) {
- /*
- * The property exists only if MemTag is supported.
- * If it is, we must allocate the ram to back that up.
- */
- if (!object_property_find(cpuobj, "tag-memory")) {
- error_report("MTE requested, but not supported "
- "by the guest CPU");
- exit(1);
+ if (tcg_enabled()) {
+ /* Create the memory region only once, but link to all cpus. */
+ if (!tag_sysmem) {
+ /*
+ * The property exists only if MemTag is supported.
+ * If it is, we must allocate the ram to back that up.
+ */
+ if (!object_property_find(cpuobj, "tag-memory")) {
+ error_report("MTE requested, but not supported "
+ "by the guest CPU");
+ exit(1);
+ }
+
+ tag_sysmem = g_new(MemoryRegion, 1);
+ memory_region_init(tag_sysmem, OBJECT(machine),
+ "tag-memory", UINT64_MAX / 32);
+
+ if (vms->secure) {
+ secure_tag_sysmem = g_new(MemoryRegion, 1);
+ memory_region_init(secure_tag_sysmem, OBJECT(machine),
+ "secure-tag-memory",
+ UINT64_MAX / 32);
+
+ /* As with ram, secure-tag takes precedence over tag. */
+ memory_region_add_subregion_overlap(secure_tag_sysmem,
+ 0, tag_sysmem, -1);
+ }
}
- tag_sysmem = g_new(MemoryRegion, 1);
- memory_region_init(tag_sysmem, OBJECT(machine),
- "tag-memory", UINT64_MAX / 32);
-
+ object_property_set_link(cpuobj, "tag-memory",
+ OBJECT(tag_sysmem), &error_abort);
if (vms->secure) {
- secure_tag_sysmem = g_new(MemoryRegion, 1);
- memory_region_init(secure_tag_sysmem, OBJECT(machine),
- "secure-tag-memory", UINT64_MAX / 32);
-
- /* As with ram, secure-tag takes precedence over tag. */
- memory_region_add_subregion_overlap(secure_tag_sysmem, 0,
- tag_sysmem, -1);
+ object_property_set_link(cpuobj, "secure-tag-memory",
+ OBJECT(secure_tag_sysmem),
+ &error_abort);
}
- }
-
- object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem),
- &error_abort);
- if (vms->secure) {
- object_property_set_link(cpuobj, "secure-tag-memory",
- OBJECT(secure_tag_sysmem),
- &error_abort);
+ } else if (kvm_enabled()) {
+ if (!kvm_arm_mte_supported()) {
+ error_report("MTE requested, but not supported by KVM");
+ exit(1);
+ }
+ kvm_arm_enable_mte(cpuobj, &error_abort);
+ } else {
+ error_report("MTE requested, but not supported ");
+ exit(1);
}
}
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 1320fd8..5b75143 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2390,14 +2390,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
#ifndef CONFIG_USER_ONLY
/*
- * If we do not have tag-memory provided by the machine,
- * reduce MTE support to instructions enabled at EL0.
+ * If we run with TCG and do not have tag-memory provided by
+ * the machine, then reduce MTE support to instructions enabled at EL0.
* This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
- if (cpu->tag_memory == NULL) {
+ if (tcg_enabled() && cpu->tag_memory == NULL) {
cpu->isar.id_aa64pfr1 =
FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
}
+
+ /*
+ * If MTE is supported by the host, however it should not be
+ * enabled on the guest (i.e mte=off), clear guest's MTE bits."
+ */
+ if (kvm_enabled() && !cpu->kvm_mte) {
+ FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+ }
#endif
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index f065756..8fc8b63 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -922,6 +922,8 @@ struct ArchCPU {
/* CPU has memory protection unit */
bool has_mpu;
+ /* CPU has MTE enabled in KVM mode */
+ bool kvm_mte;
/* PMSAv7 MPU number of supported regions */
uint32_t pmsav7_dregion;
/* PMSAv8 MPU number of supported hyp regions */
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index f1f1b5b..000afa0 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -39,6 +39,7 @@
#include "hw/acpi/acpi.h"
#include "hw/acpi/ghes.h"
#include "target/arm/gtimer.h"
+#include "migration/blocker.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
@@ -119,6 +120,21 @@ bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
if (vmfd < 0) {
goto err;
}
+
+ /*
+ * The MTE capability must be enabled by the VMM before creating
+ * any VCPUs in order to allow the MTE bits of the ID_AA64PFR1
+ * register to be probed correctly, as they are masked if MTE
+ * is not enabled.
+ */
+ if (kvm_arm_mte_supported()) {
+ KVMState kvm_state;
+
+ kvm_state.fd = kvmfd;
+ kvm_state.vmfd = vmfd;
+ kvm_vm_enable_cap(&kvm_state, KVM_CAP_ARM_MTE, 0);
+ }
+
cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
if (cpufd < 0) {
goto err;
@@ -1793,6 +1809,11 @@ bool kvm_arm_sve_supported(void)
return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
}
+bool kvm_arm_mte_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
+}
+
QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
@@ -2417,3 +2438,40 @@ int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
}
return 0;
}
+
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ static bool tried_to_enable;
+ static bool succeeded_to_enable;
+ Error *mte_migration_blocker = NULL;
+ ARMCPU *cpu = ARM_CPU(cpuobj);
+ int ret;
+
+ if (!tried_to_enable) {
+ /*
+ * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
+ * sense), and we only want a single migration blocker as well.
+ */
+ tried_to_enable = true;
+
+ ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0);
+ if (ret) {
+ error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
+ return;
+ }
+
+ /* TODO: Add migration support with MTE enabled */
+ error_setg(&mte_migration_blocker,
+ "Live migration disabled due to MTE enabled");
+ if (migrate_add_blocker(&mte_migration_blocker, errp)) {
+ error_free(mte_migration_blocker);
+ return;
+ }
+
+ succeeded_to_enable = true;
+ }
+
+ if (succeeded_to_enable) {
+ cpu->kvm_mte = true;
+ }
+}
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index cfaa0d9..4d29361 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -189,6 +189,13 @@ bool kvm_arm_pmu_supported(void);
bool kvm_arm_sve_supported(void);
/**
+ * kvm_arm_mte_supported:
+ *
+ * Returns: true if KVM can enable MTE, and false otherwise.
+ */
+bool kvm_arm_mte_supported(void);
+
+/**
* kvm_arm_get_max_vm_ipa_size:
* @ms: Machine state handle
* @fixed_ipa: True when the IPA limit is fixed at 40. This is the case
@@ -214,6 +221,8 @@ void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa);
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
+void kvm_arm_enable_mte(Object *cpuobj, Error **errp);
+
#else
/*
@@ -235,6 +244,11 @@ static inline bool kvm_arm_sve_supported(void)
return false;
}
+static inline bool kvm_arm_mte_supported(void)
+{
+ return false;
+}
+
/*
* These functions should never actually be called without KVM support.
*/
@@ -283,6 +297,11 @@ static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
g_assert_not_reached();
}
+static inline void kvm_arm_enable_mte(Object *cpuobj, Error **errp)
+{
+ g_assert_not_reached();
+}
+
#endif
#endif