aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2023-08-31 08:31:03 -0400
committerStefan Hajnoczi <stefanha@redhat.com>2023-08-31 08:31:03 -0400
commitc4e5f9a29faadc50fed673d720199db5638fbdab (patch)
tree4edc078271de5b9b77ceedd564afc3202d6fa77d /target
parent2b0612dea37802ab623e811c63a217a0b0fb9f1a (diff)
parente73b8bb8a3e9a162f70e9ffbf922d4fafc96bbfb (diff)
downloadqemu-c4e5f9a29faadc50fed673d720199db5638fbdab.zip
qemu-c4e5f9a29faadc50fed673d720199db5638fbdab.tar.gz
qemu-c4e5f9a29faadc50fed673d720199db5638fbdab.tar.bz2
Merge tag 'pull-target-arm-20230831' of https://git.linaro.org/people/pmaydell/qemu-arm into staging
target-arm queue: * Some of the preliminary patches for Cortex-A710 support * i.MX7 and i.MX6UL refactoring * Implement SRC device for i.MX7 * Catch illegal-exception-return from EL3 with bad NSE/NS * Use 64-bit offsets for holding time_t differences in RTC devices * Model correct number of MPU regions for an505, an521, an524 boards # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmTwbukZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3ihBD/wK8Iz0KpTAwZBDAodnSZrh # tQnJAvYFp8CxA4O8sZ9IeWsZh90gzsTCZi0NqUTTzvWCJfxkB7qTPdlJT5IzVxou # oEUk2aogSJhRA3XRJzqArXsPlnZGSYDbtwKx4VtfCvOCCH08Y7nhnFaRj1oFnR4Q # 0PE/8YtGXTBxLHrO8U3tomg7zElzOUP8ZVZtb30BOyw1jtfSD03IZR8dzpA43u1E # Hh418WvVekmwFoFNh8yUeHzbyXMZufzvbJPuDGJ8pPWwIpvSG6chOnKF8jZll+Ur # DqOsDkGlQgcBR2QwYfSPClrEkX8yahJ95PBfM6giG+DQC7OiElqXqTiUGZcpgUVo # uSUbzS4YPsxCnyVV6SBXV+f/8hdXBxOSHTgl7OAFa8X9OwWwspxHJ/v2o/2ibnUT # hTTkFp/w1nQwVEN8xf1DOUpm/J2Wr8UeH4f776daSrfKAol2BKbHb8dOgGLQCwqb # G+iDcE4bkzRqly6f+uVk8xSEZDd9P1NYoxKV+gNlV1dTspdHVpTC+rXMa8dRw5hI # 4KgaAslj++Xa229xkjORXCJ1cICRIebYg7+SjvTtGBYsFV7plsCcYb/R9yLmhVCf # fKHKKaYe9sQJ82apOIkTc+nnW8BQQx6XUmU/A//iZ8JGLk6DpJcZ8f1m/2rVZTsl # 9+lsmpBf4w+uR4o+Womhfw== # =MFh3 # -----END PGP SIGNATURE----- # gpg: Signature made Thu 31 Aug 2023 06:43:53 EDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] # gpg: aka "Peter Maydell <peter@archaic.org.uk>" [unknown] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * tag 'pull-target-arm-20230831' of https://git.linaro.org/people/pmaydell/qemu-arm: (24 commits) hw/arm: Set number of MPU regions correctly for an505, an521, an524 hw/arm/armv7m: Add mpu-ns-regions and mpu-s-regions properties target/arm: Do all "ARM_FEATURE_X implies Y" checks in post_init rtc: Use time_t for passing and returning time offsets hw/rtc/aspeed_rtc: Use 64-bit offset for holding time_t difference hw/rtc/twl92230: Use int64_t for sec_offset and alm_sec hw/rtc/m48t59: Use 64-bit arithmetic in set_alarm() target/arm: Catch illegal-exception-return from EL3 with bad NSE/NS Add i.MX7 SRC device implementation Add i.MX7 missing TZ devices and memory regions Refactor i.MX7 processor code Add i.MX6UL missing devices. Refactor i.MX6UL processor code Remove i.MX7 IOMUX GPR device from i.MX6UL target/arm: properly document FEAT_CRC32 target/arm: Implement FEAT_HPDS2 as a no-op target/arm: Suppress FEAT_TRBE (Trace Buffer Extension) target/arm: Apply access checks to neoverse-v1 special registers target/arm: Apply access checks to neoverse-n1 special registers target/arm: Introduce make_ccsidr64 ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpregs.h2
-rw-r--r--target/arm/cpu.c205
-rw-r--r--target/arm/cpu.h5
-rw-r--r--target/arm/helper.c15
-rw-r--r--target/arm/internals.h6
-rw-r--r--target/arm/tcg/cpu32.c2
-rw-r--r--target/arm/tcg/cpu64.c102
-rw-r--r--target/arm/tcg/helper-a64.c9
-rw-r--r--target/arm/tcg/mte_helper.c90
-rw-r--r--target/arm/tcg/translate-a64.c5
-rw-r--r--target/arm/tcg/translate.h2
11 files changed, 293 insertions, 150 deletions
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index 1478568..f1293d1 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -1077,4 +1077,6 @@ static inline void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu) { }
void define_cortex_a72_a57_a53_cp_reginfo(ARMCPU *cpu);
#endif
+CPAccessResult access_tvm_trvm(CPUARMState *, const ARMCPRegInfo *, bool);
+
#endif /* TARGET_ARM_CPREGS_H */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index d906d2b..0bb0585 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1356,17 +1356,108 @@ unsigned int gt_cntfrq_period_ns(ARMCPU *cpu)
NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1;
}
+static void arm_cpu_propagate_feature_implications(ARMCPU *cpu)
+{
+ CPUARMState *env = &cpu->env;
+ bool no_aa32 = false;
+
+ /*
+ * Some features automatically imply others: set the feature
+ * bits explicitly for these cases.
+ */
+
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_PMSA);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_V7);
+ } else {
+ set_feature(env, ARM_FEATURE_V7VE);
+ }
+ }
+
+ /*
+ * There exist AArch64 cpus without AArch32 support. When KVM
+ * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
+ * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
+ * As a general principle, we also do not make ID register
+ * consistency checks anywhere unless using TCG, because only
+ * for TCG would a consistency-check failure be a QEMU bug.
+ */
+ if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_V7VE)) {
+ /*
+ * v7 Virtualization Extensions. In real hardware this implies
+ * EL2 and also the presence of the Security Extensions.
+ * For QEMU, for backwards-compatibility we implement some
+ * CPUs or CPU configs which have no actual EL2 or EL3 but do
+ * include the various other features that V7VE implies.
+ * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
+ * Security Extensions is ARM_FEATURE_EL3.
+ */
+ assert(!tcg_enabled() || no_aa32 ||
+ cpu_isar_feature(aa32_arm_div, cpu));
+ set_feature(env, ARM_FEATURE_LPAE);
+ set_feature(env, ARM_FEATURE_V7);
+ }
+ if (arm_feature(env, ARM_FEATURE_V7)) {
+ set_feature(env, ARM_FEATURE_VAPA);
+ set_feature(env, ARM_FEATURE_THUMB2);
+ set_feature(env, ARM_FEATURE_MPIDR);
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_V6K);
+ } else {
+ set_feature(env, ARM_FEATURE_V6);
+ }
+
+ /*
+ * Always define VBAR for V7 CPUs even if it doesn't exist in
+ * non-EL3 configs. This is needed by some legacy boards.
+ */
+ set_feature(env, ARM_FEATURE_VBAR);
+ }
+ if (arm_feature(env, ARM_FEATURE_V6K)) {
+ set_feature(env, ARM_FEATURE_V6);
+ set_feature(env, ARM_FEATURE_MVFR);
+ }
+ if (arm_feature(env, ARM_FEATURE_V6)) {
+ set_feature(env, ARM_FEATURE_V5);
+ if (!arm_feature(env, ARM_FEATURE_M)) {
+ assert(!tcg_enabled() || no_aa32 ||
+ cpu_isar_feature(aa32_jazelle, cpu));
+ set_feature(env, ARM_FEATURE_AUXCR);
+ }
+ }
+ if (arm_feature(env, ARM_FEATURE_V5)) {
+ set_feature(env, ARM_FEATURE_V4T);
+ }
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ set_feature(env, ARM_FEATURE_V7MP);
+ }
+ if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
+ set_feature(env, ARM_FEATURE_CBAR);
+ }
+ if (arm_feature(env, ARM_FEATURE_THUMB2) &&
+ !arm_feature(env, ARM_FEATURE_M)) {
+ set_feature(env, ARM_FEATURE_THUMB_DSP);
+ }
+}
+
void arm_cpu_post_init(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
- /* M profile implies PMSA. We have to do this here rather than
- * in realize with the other feature-implication checks because
- * we look at the PMSA bit to see if we should add some properties.
+ /*
+ * Some features imply others. Figure this out now, because we
+ * are going to look at the feature bits in deciding which
+ * properties to add.
*/
- if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
- set_feature(&cpu->env, ARM_FEATURE_PMSA);
- }
+ arm_cpu_propagate_feature_implications(cpu);
if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
@@ -1588,7 +1679,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
CPUARMState *env = &cpu->env;
int pagebits;
Error *local_err = NULL;
- bool no_aa32 = false;
/* Use pc-relative instructions in system-mode */
#ifndef CONFIG_USER_ONLY
@@ -1869,81 +1959,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
cpu->isar.id_isar3 = u;
}
- /* Some features automatically imply others: */
- if (arm_feature(env, ARM_FEATURE_V8)) {
- if (arm_feature(env, ARM_FEATURE_M)) {
- set_feature(env, ARM_FEATURE_V7);
- } else {
- set_feature(env, ARM_FEATURE_V7VE);
- }
- }
-
- /*
- * There exist AArch64 cpus without AArch32 support. When KVM
- * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
- * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
- * As a general principle, we also do not make ID register
- * consistency checks anywhere unless using TCG, because only
- * for TCG would a consistency-check failure be a QEMU bug.
- */
- if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
- }
-
- if (arm_feature(env, ARM_FEATURE_V7VE)) {
- /* v7 Virtualization Extensions. In real hardware this implies
- * EL2 and also the presence of the Security Extensions.
- * For QEMU, for backwards-compatibility we implement some
- * CPUs or CPU configs which have no actual EL2 or EL3 but do
- * include the various other features that V7VE implies.
- * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
- * Security Extensions is ARM_FEATURE_EL3.
- */
- assert(!tcg_enabled() || no_aa32 ||
- cpu_isar_feature(aa32_arm_div, cpu));
- set_feature(env, ARM_FEATURE_LPAE);
- set_feature(env, ARM_FEATURE_V7);
- }
- if (arm_feature(env, ARM_FEATURE_V7)) {
- set_feature(env, ARM_FEATURE_VAPA);
- set_feature(env, ARM_FEATURE_THUMB2);
- set_feature(env, ARM_FEATURE_MPIDR);
- if (!arm_feature(env, ARM_FEATURE_M)) {
- set_feature(env, ARM_FEATURE_V6K);
- } else {
- set_feature(env, ARM_FEATURE_V6);
- }
-
- /* Always define VBAR for V7 CPUs even if it doesn't exist in
- * non-EL3 configs. This is needed by some legacy boards.
- */
- set_feature(env, ARM_FEATURE_VBAR);
- }
- if (arm_feature(env, ARM_FEATURE_V6K)) {
- set_feature(env, ARM_FEATURE_V6);
- set_feature(env, ARM_FEATURE_MVFR);
- }
- if (arm_feature(env, ARM_FEATURE_V6)) {
- set_feature(env, ARM_FEATURE_V5);
- if (!arm_feature(env, ARM_FEATURE_M)) {
- assert(!tcg_enabled() || no_aa32 ||
- cpu_isar_feature(aa32_jazelle, cpu));
- set_feature(env, ARM_FEATURE_AUXCR);
- }
- }
- if (arm_feature(env, ARM_FEATURE_V5)) {
- set_feature(env, ARM_FEATURE_V4T);
- }
- if (arm_feature(env, ARM_FEATURE_LPAE)) {
- set_feature(env, ARM_FEATURE_V7MP);
- }
- if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
- set_feature(env, ARM_FEATURE_CBAR);
- }
- if (arm_feature(env, ARM_FEATURE_THUMB2) &&
- !arm_feature(env, ARM_FEATURE_M)) {
- set_feature(env, ARM_FEATURE_THUMB_DSP);
- }
/*
* We rely on no XScale CPU having VFP so we can use the same bits in the
@@ -2056,16 +2071,27 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
ID_PFR1, VIRTUALIZATION, 0);
}
+ if (cpu_isar_feature(aa64_mte, cpu)) {
+ /*
+ * The architectural range of GM blocksize is 2-6, however qemu
+ * doesn't support blocksize of 2 (see HELPER(ldgm)).
+ */
+ if (tcg_enabled()) {
+ assert(cpu->gm_blocksize >= 3 && cpu->gm_blocksize <= 6);
+ }
+
#ifndef CONFIG_USER_ONLY
- if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
/*
- * Disable the MTE feature bits if we do not have tag-memory
- * provided by the machine.
+ * If we do not have tag-memory provided by the machine,
+ * reduce MTE support to instructions enabled at EL0.
+ * This matches Cortex-A710 BROADCASTMTE input being LOW.
*/
- cpu->isar.id_aa64pfr1 =
- FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
- }
+ if (cpu->tag_memory == NULL) {
+ cpu->isar.id_aa64pfr1 =
+ FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 1);
+ }
#endif
+ }
if (tcg_enabled()) {
/*
@@ -2077,6 +2103,9 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
/* FEAT_SPE (Statistical Profiling Extension) */
cpu->isar.id_aa64dfr0 =
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMSVER, 0);
+ /* FEAT_TRBE (Trace Buffer Extension) */
+ cpu->isar.id_aa64dfr0 =
+ FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEBUFFER, 0);
/* FEAT_TRF (Self-hosted Trace Extension) */
cpu->isar.id_aa64dfr0 =
FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, TRACEFILT, 0);
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index cdf8600..278cc13 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1074,7 +1074,10 @@ struct ArchCPU {
bool prop_lpa2;
/* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
- uint32_t dcz_blocksize;
+ uint8_t dcz_blocksize;
+ /* GM blocksize, in log_2(words), ie low 4 bits of GMID_EL0 */
+ uint8_t gm_blocksize;
+
uint64_t rvbar_prop; /* Property/input signals. */
/* Configurable aspects of GIC cpu interface (which is part of the CPU) */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 85291d5..e3f5a7d 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -319,8 +319,8 @@ static CPAccessResult access_tpm(CPUARMState *env, const ARMCPRegInfo *ri,
}
/* Check for traps from EL1 due to HCR_EL2.TVM and HCR_EL2.TRVM. */
-static CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
- bool isread)
+CPAccessResult access_tvm_trvm(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
{
if (arm_current_el(env) == 1) {
uint64_t trap = isread ? HCR_TRVM : HCR_TVM;
@@ -7748,10 +7748,6 @@ static const ARMCPRegInfo mte_reginfo[] = {
.opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 6,
.access = PL1_RW, .accessfn = access_mte,
.fieldoffset = offsetof(CPUARMState, cp15.gcr_el1) },
- { .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
- .access = PL1_R, .accessfn = access_aa64_tid5,
- .type = ARM_CP_CONST, .resetvalue = GMID_EL1_BS },
{ .name = "TCO", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 3, .crn = 4, .crm = 2, .opc2 = 7,
.type = ARM_CP_NO_RAW,
@@ -9342,6 +9338,13 @@ void register_cp_regs_for_features(ARMCPU *cpu)
* then define only a RAZ/WI version of PSTATE.TCO.
*/
if (cpu_isar_feature(aa64_mte, cpu)) {
+ ARMCPRegInfo gmid_reginfo = {
+ .name = "GMID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 4,
+ .access = PL1_R, .accessfn = access_aa64_tid5,
+ .type = ARM_CP_CONST, .resetvalue = cpu->gm_blocksize,
+ };
+ define_one_arm_cp_reg(cpu, &gmid_reginfo);
define_arm_cp_regs(cpu, mte_reginfo);
define_arm_cp_regs(cpu, mte_el0_cacheop_reginfo);
} else if (cpu_isar_feature(aa64_mte_insn_reg, cpu)) {
diff --git a/target/arm/internals.h b/target/arm/internals.h
index cf13bb9..5f5393b 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1247,12 +1247,6 @@ void arm_log_exception(CPUState *cs);
#endif /* !CONFIG_USER_ONLY */
/*
- * The log2 of the words in the tag block, for GMID_EL1.BS.
- * The is the maximum, 256 bytes, which manipulates 64-bits of tags.
- */
-#define GMID_EL1_BS 6
-
-/*
* SVE predicates are 1/8 the size of SVE vectors, and cannot use
* the same simd_desc() encoding due to restrictions on size.
* Use these instead.
diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c
index 47d2e8e..1f918ff 100644
--- a/target/arm/tcg/cpu32.c
+++ b/target/arm/tcg/cpu32.c
@@ -62,7 +62,7 @@ void aa32_max_features(ARMCPU *cpu)
cpu->isar.id_mmfr3 = t;
t = cpu->isar.id_mmfr4;
- t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */
+ t = FIELD_DP32(t, ID_MMFR4, HPDS, 2); /* FEAT_HPDS2 */
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */
t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */
diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c
index 8019f00..0f89729 100644
--- a/target/arm/tcg/cpu64.c
+++ b/target/arm/tcg/cpu64.c
@@ -24,9 +24,36 @@
#include "qemu/module.h"
#include "qapi/visitor.h"
#include "hw/qdev-properties.h"
+#include "qemu/units.h"
#include "internals.h"
#include "cpregs.h"
+static uint64_t make_ccsidr64(unsigned assoc, unsigned linesize,
+ unsigned cachesize)
+{
+ unsigned lg_linesize = ctz32(linesize);
+ unsigned sets;
+
+ /*
+ * The 64-bit CCSIDR_EL1 format is:
+ * [55:32] number of sets - 1
+ * [23:3] associativity - 1
+ * [2:0] log2(linesize) - 4
+ * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
+ */
+ assert(assoc != 0);
+ assert(is_power_of_2(linesize));
+ assert(lg_linesize >= 4 && lg_linesize <= 7 + 4);
+
+ /* sets * associativity * linesize == cachesize. */
+ sets = cachesize / (assoc * linesize);
+ assert(cachesize % (assoc * linesize) == 0);
+
+ return ((uint64_t)(sets - 1) << 32)
+ | ((assoc - 1) << 3)
+ | (lg_linesize - 4);
+}
+
static void aarch64_a35_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
@@ -436,10 +463,30 @@ static void aarch64_a64fx_initfn(Object *obj)
/* TODO: Add A64FX specific HPC extension registers */
}
+static CPAccessResult access_actlr_w(CPUARMState *env, const ARMCPRegInfo *r,
+ bool read)
+{
+ if (!read) {
+ int el = arm_current_el(env);
+
+ /* Because ACTLR_EL2 is constant 0, writes below EL2 trap to EL2. */
+ if (el < 2 && arm_is_el2_enabled(env)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ /* Because ACTLR_EL3 is constant 0, writes below EL3 trap to EL3. */
+ if (el < 3 && arm_feature(env, ARM_FEATURE_EL3)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ }
+ return CP_ACCESS_OK;
+}
+
static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
{ .name = "ATCR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 7, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ /* Traps and enables are the same as for TCR_EL1. */
+ .accessfn = access_tvm_trvm, .fgt = FGT_TCR_EL1, },
{ .name = "ATCR_EL2", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 4, .crn = 15, .crm = 7, .opc2 = 0,
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
@@ -454,13 +501,16 @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
.access = PL2_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUACTLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
{ .name = "CPUACTLR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 1,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
{ .name = "CPUACTLR3_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
/*
* Report CPUCFR_EL1.SCU as 1, as we do not implement the DSU
* (and in particular its system registers).
@@ -470,7 +520,8 @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 4 },
{ .name = "CPUECTLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 4,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0x961563010,
+ .accessfn = access_actlr_w },
{ .name = "CPUPCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 8, .opc2 = 1,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
@@ -485,16 +536,20 @@ static const ARMCPRegInfo neoverse_n1_cp_reginfo[] = {
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
{ .name = "CPUPWRCTLR_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 7,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
{ .name = "ERXPFGCDN_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 2,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
{ .name = "ERXPFGCTL_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 1,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
{ .name = "ERXPFGF_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 2, .opc2 = 0,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
};
static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
@@ -505,7 +560,8 @@ static void define_neoverse_n1_cp_reginfo(ARMCPU *cpu)
static const ARMCPRegInfo neoverse_v1_cp_reginfo[] = {
{ .name = "CPUECTLR2_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 15, .crm = 1, .opc2 = 5,
- .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .accessfn = access_actlr_w },
{ .name = "CPUPPMCR_EL3", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 6, .crn = 15, .crm = 2, .opc2 = 0,
.access = PL3_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
@@ -651,26 +707,15 @@ static void aarch64_neoverse_v1_initfn(Object *obj)
* The Neoverse-V1 r1p2 TRM lists 32-bit format CCSIDR_EL1 values,
* but also says it implements CCIDX, which means they should be
* 64-bit format. So we here use values which are based on the textual
- * information in chapter 2 of the TRM (and on the fact that
- * sets * associativity * linesize == cachesize).
- *
- * The 64-bit CCSIDR_EL1 format is:
- * [55:32] number of sets - 1
- * [23:3] associativity - 1
- * [2:0] log2(linesize) - 4
- * so 0 == 16 bytes, 1 == 32 bytes, 2 == 64 bytes, etc
- *
- * L1: 4-way set associative 64-byte line size, total size 64K,
- * so sets is 256.
+ * information in chapter 2 of the TRM:
*
+ * L1: 4-way set associative 64-byte line size, total size 64K.
* L2: 8-way set associative, 64 byte line size, either 512K or 1MB.
- * We pick 1MB, so this has 2048 sets.
- *
* L3: No L3 (this matches the CLIDR_EL1 value).
*/
- cpu->ccsidr[0] = 0x000000ff0000001aull; /* 64KB L1 dcache */
- cpu->ccsidr[1] = 0x000000ff0000001aull; /* 64KB L1 icache */
- cpu->ccsidr[2] = 0x000007ff0000003aull; /* 1MB L2 cache */
+ cpu->ccsidr[0] = make_ccsidr64(4, 64, 64 * KiB); /* L1 dcache */
+ cpu->ccsidr[1] = cpu->ccsidr[0]; /* L1 icache */
+ cpu->ccsidr[2] = make_ccsidr64(8, 64, 1 * MiB); /* L2 cache */
/* From 3.2.115 SCTLR_EL3 */
cpu->reset_sctlr = 0x30c50838;
@@ -743,7 +788,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */
- t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); /* FEAT_CRC32 */
t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */
t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */
@@ -807,7 +852,7 @@ void aarch64_max_tcg_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, HAFDBS, 2); /* FEAT_HAFDBS */
t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* FEAT_VMID16 */
t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); /* FEAT_VHE */
- t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* FEAT_HPDS */
+ t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 2); /* FEAT_HPDS2 */
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 3); /* FEAT_PAN3 */
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
@@ -868,6 +913,7 @@ void aarch64_max_tcg_initfn(Object *obj)
cpu->ctr = 0x80038003; /* 32 byte I and D cacheline size, VIPT icache */
cpu->dcz_blocksize = 7; /* 512 bytes */
#endif
+ cpu->gm_blocksize = 6; /* 256 bytes */
cpu->sve_vq.supported = MAKE_64BIT_MASK(0, ARM_MAX_VQ);
cpu->sme_vq.supported = SVE_VQ_POW2_MAP;
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 1c9370f..0cf56f6 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -780,6 +780,15 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc)
spsr &= ~PSTATE_SS;
}
+ /*
+ * FEAT_RME forbids return from EL3 with an invalid security state.
+ * We don't need an explicit check for FEAT_RME here because we enforce
+ * in scr_write() that you can't set the NSE bit without it.
+ */
+ if (cur_el == 3 && (env->cp15.scr_el3 & (SCR_NS | SCR_NSE)) == SCR_NSE) {
+ goto illegal_return;
+ }
+
new_el = el_from_spsr(spsr);
if (new_el == -1) {
goto illegal_return;
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 9c64def..b23d115 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -421,46 +421,82 @@ void HELPER(st2g_stub)(CPUARMState *env, uint64_t ptr)
}
}
-#define LDGM_STGM_SIZE (4 << GMID_EL1_BS)
-
uint64_t HELPER(ldgm)(CPUARMState *env, uint64_t ptr)
{
int mmu_idx = cpu_mmu_index(env, false);
uintptr_t ra = GETPC();
+ int gm_bs = env_archcpu(env)->gm_blocksize;
+ int gm_bs_bytes = 4 << gm_bs;
void *tag_mem;
+ uint64_t ret;
+ int shift;
- ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
+ ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
/* Trap if accessing an invalid page. */
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_LOAD,
- LDGM_STGM_SIZE, MMU_DATA_LOAD,
- LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
+ gm_bs_bytes, MMU_DATA_LOAD,
+ gm_bs_bytes / (2 * TAG_GRANULE), ra);
/* The tag is squashed to zero if the page does not support tags. */
if (!tag_mem) {
return 0;
}
- QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
/*
- * We are loading 64-bits worth of tags. The ordering of elements
- * within the word corresponds to a 64-bit little-endian operation.
+ * The ordering of elements within the word corresponds to
+ * a little-endian operation. Computation of shift comes from
+ *
+ * index = address<LOG2_TAG_GRANULE+3:LOG2_TAG_GRANULE>
+ * data<index*4+3:index*4> = tag
+ *
+ * Because of the alignment of ptr above, BS=6 has shift=0.
+ * All memory operations are aligned. Defer support for BS=2,
+ * requiring insertion or extraction of a nibble, until we
+ * support a cpu that requires it.
*/
- return ldq_le_p(tag_mem);
+ switch (gm_bs) {
+ case 3:
+ /* 32 bytes -> 2 tags -> 8 result bits */
+ ret = *(uint8_t *)tag_mem;
+ break;
+ case 4:
+ /* 64 bytes -> 4 tags -> 16 result bits */
+ ret = cpu_to_le16(*(uint16_t *)tag_mem);
+ break;
+ case 5:
+ /* 128 bytes -> 8 tags -> 32 result bits */
+ ret = cpu_to_le32(*(uint32_t *)tag_mem);
+ break;
+ case 6:
+ /* 256 bytes -> 16 tags -> 64 result bits */
+ return cpu_to_le64(*(uint64_t *)tag_mem);
+ default:
+ /*
+ * CPU configured with unsupported/invalid gm blocksize.
+ * This is detected early in arm_cpu_realizefn.
+ */
+ g_assert_not_reached();
+ }
+ shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
+ return ret << shift;
}
void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
{
int mmu_idx = cpu_mmu_index(env, false);
uintptr_t ra = GETPC();
+ int gm_bs = env_archcpu(env)->gm_blocksize;
+ int gm_bs_bytes = 4 << gm_bs;
void *tag_mem;
+ int shift;
- ptr = QEMU_ALIGN_DOWN(ptr, LDGM_STGM_SIZE);
+ ptr = QEMU_ALIGN_DOWN(ptr, gm_bs_bytes);
/* Trap if accessing an invalid page. */
tag_mem = allocation_tag_mem(env, mmu_idx, ptr, MMU_DATA_STORE,
- LDGM_STGM_SIZE, MMU_DATA_LOAD,
- LDGM_STGM_SIZE / (2 * TAG_GRANULE), ra);
+ gm_bs_bytes, MMU_DATA_LOAD,
+ gm_bs_bytes / (2 * TAG_GRANULE), ra);
/*
* Tag store only happens if the page support tags,
@@ -470,12 +506,30 @@ void HELPER(stgm)(CPUARMState *env, uint64_t ptr, uint64_t val)
return;
}
- QEMU_BUILD_BUG_ON(GMID_EL1_BS != 6);
- /*
- * We are storing 64-bits worth of tags. The ordering of elements
- * within the word corresponds to a 64-bit little-endian operation.
- */
- stq_le_p(tag_mem, val);
+ /* See LDGM for comments on BS and on shift. */
+ shift = extract64(ptr, LOG2_TAG_GRANULE, 4) * 4;
+ val >>= shift;
+ switch (gm_bs) {
+ case 3:
+ /* 32 bytes -> 2 tags -> 8 result bits */
+ *(uint8_t *)tag_mem = val;
+ break;
+ case 4:
+ /* 64 bytes -> 4 tags -> 16 result bits */
+ *(uint16_t *)tag_mem = cpu_to_le16(val);
+ break;
+ case 5:
+ /* 128 bytes -> 8 tags -> 32 result bits */
+ *(uint32_t *)tag_mem = cpu_to_le32(val);
+ break;
+ case 6:
+ /* 256 bytes -> 16 tags -> 64 result bits */
+ *(uint64_t *)tag_mem = cpu_to_le64(val);
+ break;
+ default:
+ /* cpu configured with unsupported gm blocksize. */
+ g_assert_not_reached();
+ }
}
void HELPER(stzgm_tags)(CPUARMState *env, uint64_t ptr, uint64_t val)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index da686cc..0b77c92 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -3786,7 +3786,7 @@ static bool trans_STGM(DisasContext *s, arg_ldst_tag *a)
gen_helper_stgm(cpu_env, addr, tcg_rt);
} else {
MMUAccessType acc = MMU_DATA_STORE;
- int size = 4 << GMID_EL1_BS;
+ int size = 4 << s->gm_blocksize;
clean_addr = clean_data_tbi(s, addr);
tcg_gen_andi_i64(clean_addr, clean_addr, -size);
@@ -3818,7 +3818,7 @@ static bool trans_LDGM(DisasContext *s, arg_ldst_tag *a)
gen_helper_ldgm(tcg_rt, cpu_env, addr);
} else {
MMUAccessType acc = MMU_DATA_LOAD;
- int size = 4 << GMID_EL1_BS;
+ int size = 4 << s->gm_blocksize;
clean_addr = clean_data_tbi(s, addr);
tcg_gen_andi_i64(clean_addr, clean_addr, -size);
@@ -13896,6 +13896,7 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase,
dc->cp_regs = arm_cpu->cp_regs;
dc->features = env->features;
dc->dcz_blocksize = arm_cpu->dcz_blocksize;
+ dc->gm_blocksize = arm_cpu->gm_blocksize;
#ifdef CONFIG_USER_ONLY
/* In sve_probe_page, we assume TBI is enabled. */
diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h
index d1cacff..f748ba6 100644
--- a/target/arm/tcg/translate.h
+++ b/target/arm/tcg/translate.h
@@ -151,6 +151,8 @@ typedef struct DisasContext {
int8_t btype;
/* A copy of cpu->dcz_blocksize. */
uint8_t dcz_blocksize;
+ /* A copy of cpu->gm_blocksize. */
+ uint8_t gm_blocksize;
/* True if this page is guarded. */
bool guarded_page;
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */