aboutsummaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-06-20 10:52:01 -0700
committerPeter Maydell <peter.maydell@linaro.org>2022-06-27 11:18:17 +0100
commite74c097638d38b46d9c68f11565432034afc0ad0 (patch)
tree8f885e504469e1bc251308e66fb0592c4d395868 /target/arm
parent70cc9ee19e53bc8bc597c5134e294a2ab377c4da (diff)
downloadqemu-e74c097638d38b46d9c68f11565432034afc0ad0.zip
qemu-e74c097638d38b46d9c68f11565432034afc0ad0.tar.gz
qemu-e74c097638d38b46d9c68f11565432034afc0ad0.tar.bz2
target/arm: Add cpu properties for SME
Mirror the properties for SVE. The main difference is that any arbitrary set of powers of 2 may be supported, and not the stricter constraints that apply to SVE. Include a property to control FEAT_SME_FA64, as failing to restrict the runtime to the proper subset of insns could be a major point for bugs. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-id: 20220620175235.60881-18-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/cpu.c14
-rw-r--r--target/arm/cpu.h2
-rw-r--r--target/arm/cpu64.c114
-rw-r--r--target/arm/internals.h1
4 files changed, 124 insertions, 7 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 660fd8b..bb44ad4 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1123,11 +1123,13 @@ static void arm_cpu_initfn(Object *obj)
#ifdef CONFIG_USER_ONLY
# ifdef TARGET_AARCH64
/*
- * The linux kernel defaults to 512-bit vectors, when sve is supported.
- * See documentation for /proc/sys/abi/sve_default_vector_length, and
- * our corresponding sve-default-vector-length cpu property.
+ * The linux kernel defaults to 512-bit for SVE, and 256-bit for SME.
+ * These values were chosen to fit within the default signal frame.
+ * See documentation for /proc/sys/abi/{sve,sme}_default_vector_length,
+ * and our corresponding cpu property.
*/
cpu->sve_default_vq = 4;
+ cpu->sme_default_vq = 2;
# endif
#else
/* Our inbound IRQ and FIQ lines */
@@ -1430,6 +1432,12 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
return;
}
+ arm_cpu_sme_finalize(cpu, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
arm_cpu_pauth_finalize(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index d2b005f..c018f97 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1060,9 +1060,11 @@ struct ArchCPU {
#ifdef CONFIG_USER_ONLY
/* Used to set the default vector length at process start. */
uint32_t sve_default_vq;
+ uint32_t sme_default_vq;
#endif
ARMVQMap sve_vq;
+ ARMVQMap sme_vq;
/* Generic timer counter frequency, in Hz */
uint64_t gt_cntfrq_hz;
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 6f6ee57..19188d6 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -589,10 +589,13 @@ static void cpu_arm_get_vq(Object *obj, Visitor *v, const char *name,
ARMCPU *cpu = ARM_CPU(obj);
ARMVQMap *vq_map = opaque;
uint32_t vq = atoi(&name[3]) / 128;
+ bool sve = vq_map == &cpu->sve_vq;
bool value;
- /* All vector lengths are disabled when SVE is off. */
- if (!cpu_isar_feature(aa64_sve, cpu)) {
+ /* All vector lengths are disabled when feature is off. */
+ if (sve
+ ? !cpu_isar_feature(aa64_sve, cpu)
+ : !cpu_isar_feature(aa64_sme, cpu)) {
value = false;
} else {
value = extract32(vq_map->map, vq - 1, 1);
@@ -636,8 +639,80 @@ static void cpu_arm_set_sve(Object *obj, bool value, Error **errp)
cpu->isar.id_aa64pfr0 = t;
}
+void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp)
+{
+ uint32_t vq_map = cpu->sme_vq.map;
+ uint32_t vq_init = cpu->sme_vq.init;
+ uint32_t vq_supported = cpu->sme_vq.supported;
+ uint32_t vq;
+
+ if (vq_map == 0) {
+ if (!cpu_isar_feature(aa64_sme, cpu)) {
+ cpu->isar.id_aa64smfr0 = 0;
+ return;
+ }
+
+ /* TODO: KVM will require limitations via SMCR_EL2. */
+ vq_map = vq_supported & ~vq_init;
+
+ if (vq_map == 0) {
+ vq = ctz32(vq_supported) + 1;
+ error_setg(errp, "cannot disable sme%d", vq * 128);
+ error_append_hint(errp, "All SME vector lengths are disabled.\n");
+ error_append_hint(errp, "With SME enabled, at least one "
+ "vector length must be enabled.\n");
+ return;
+ }
+ } else {
+ if (!cpu_isar_feature(aa64_sme, cpu)) {
+ vq = 32 - clz32(vq_map);
+ error_setg(errp, "cannot enable sme%d", vq * 128);
+ error_append_hint(errp, "SME must be enabled to enable "
+ "vector lengths.\n");
+ error_append_hint(errp, "Add sme=on to the CPU property list.\n");
+ return;
+ }
+ /* TODO: KVM will require limitations via SMCR_EL2. */
+ }
+
+ cpu->sme_vq.map = vq_map;
+}
+
+static bool cpu_arm_get_sme(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ return cpu_isar_feature(aa64_sme, cpu);
+}
+
+static void cpu_arm_set_sme(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint64_t t;
+
+ t = cpu->isar.id_aa64pfr1;
+ t = FIELD_DP64(t, ID_AA64PFR1, SME, value);
+ cpu->isar.id_aa64pfr1 = t;
+}
+
+static bool cpu_arm_get_sme_fa64(Object *obj, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ return cpu_isar_feature(aa64_sme, cpu) &&
+ cpu_isar_feature(aa64_sme_fa64, cpu);
+}
+
+static void cpu_arm_set_sme_fa64(Object *obj, bool value, Error **errp)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint64_t t;
+
+ t = cpu->isar.id_aa64smfr0;
+ t = FIELD_DP64(t, ID_AA64SMFR0, FA64, value);
+ cpu->isar.id_aa64smfr0 = t;
+}
+
#ifdef CONFIG_USER_ONLY
-/* Mirror linux /proc/sys/abi/sve_default_vector_length. */
+/* Mirror linux /proc/sys/abi/{sve,sme}_default_vector_length. */
static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
@@ -663,7 +738,11 @@ static void cpu_arm_set_default_vec_len(Object *obj, Visitor *v,
* and is the maximum architectural width of ZCR_ELx.LEN.
*/
if (remainder || default_vq < 1 || default_vq > 512) {
- error_setg(errp, "cannot set sve-default-vector-length");
+ ARMCPU *cpu = ARM_CPU(obj);
+ const char *which =
+ (ptr_default_vq == &cpu->sve_default_vq ? "sve" : "sme");
+
+ error_setg(errp, "cannot set %s-default-vector-length", which);
if (remainder) {
error_append_hint(errp, "Vector length not a multiple of 16\n");
} else if (default_vq < 1) {
@@ -712,6 +791,31 @@ static void aarch64_add_sve_properties(Object *obj)
#endif
}
+static void aarch64_add_sme_properties(Object *obj)
+{
+ ARMCPU *cpu = ARM_CPU(obj);
+ uint32_t vq;
+
+ object_property_add_bool(obj, "sme", cpu_arm_get_sme, cpu_arm_set_sme);
+ object_property_add_bool(obj, "sme_fa64", cpu_arm_get_sme_fa64,
+ cpu_arm_set_sme_fa64);
+
+ for (vq = 1; vq <= ARM_MAX_VQ; vq <<= 1) {
+ char name[8];
+ sprintf(name, "sme%d", vq * 128);
+ object_property_add(obj, name, "bool", cpu_arm_get_vq,
+ cpu_arm_set_vq, NULL, &cpu->sme_vq);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ /* Mirror linux /proc/sys/abi/sme_default_vector_length. */
+ object_property_add(obj, "sme-default-vector-length", "int32",
+ cpu_arm_get_default_vec_len,
+ cpu_arm_set_default_vec_len, NULL,
+ &cpu->sme_default_vq);
+#endif
+}
+
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp)
{
int arch_val = 0, impdef_val = 0;
@@ -977,9 +1081,11 @@ static void aarch64_max_initfn(Object *obj)
#endif
cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
+ cpu->sme_vq.supported = SVE_VQ_POW2_MAP;
aarch64_add_pauth_properties(obj);
aarch64_add_sve_properties(obj);
+ aarch64_add_sme_properties(obj);
object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
cpu_max_set_sve_max_vq, NULL, NULL);
qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
diff --git a/target/arm/internals.h b/target/arm/internals.h
index aef568a..c66f74a 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1289,6 +1289,7 @@ int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg);
int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg);
int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg);
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
+void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
#endif