aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2022-10-03 17:23:13 +0100
committerPeter Maydell <peter.maydell@linaro.org>2022-10-10 14:52:25 +0100
commit104f703d93c9f12984a165985af653f83527c84e (patch)
tree9996b833143bf69ce234b5a8e25c205452e32753 /target
parent0ff993193fe759b735e382fbe06b8258b537f95d (diff)
downloadqemu-104f703d93c9f12984a165985af653f83527c84e.zip
qemu-104f703d93c9f12984a165985af653f83527c84e.tar.gz
qemu-104f703d93c9f12984a165985af653f83527c84e.tar.bz2
target/arm: Don't allow guest to use unimplemented granule sizes
Arm CPUs support some subset of the granule (page) sizes 4K, 16K and 64K. The guest selects the one it wants using bits in the TCR_ELx registers. If it tries to program these registers with a value that is either reserved or which requests a size that the CPU does not implement, the architecture requires that the CPU behaves as if the field was programmed to some size that has been implemented. Currently we don't implement this, and instead let the guest use any granule size, even if the CPU ID register fields say it isn't present. Make aa64_va_parameters() check against the supported granule size and force use of a different one if it is not implemented. (A subsequent commit will make ARMVAParameters use the new enum rather than the current pair of using16k/using64k bools.) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Message-id: 20221003162315.2833797-2-peter.maydell@linaro.org
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu.h33
-rw-r--r--target/arm/helper.c102
-rw-r--r--target/arm/internals.h9
3 files changed, 136 insertions, 8 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index d541392..1a909a1 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -4097,6 +4097,39 @@ static inline bool isar_feature_aa64_tgran16_2_lpa2(const ARMISARegisters *id)
return t >= 3 || (t == 0 && isar_feature_aa64_tgran16_lpa2(id));
}
+static inline bool isar_feature_aa64_tgran4(const ARMISARegisters *id)
+{
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4) >= 0;
+}
+
+static inline bool isar_feature_aa64_tgran16(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16) >= 1;
+}
+
+static inline bool isar_feature_aa64_tgran64(const ARMISARegisters *id)
+{
+ return FIELD_SEX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64) >= 0;
+}
+
+static inline bool isar_feature_aa64_tgran4_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN4_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran4(id));
+}
+
+static inline bool isar_feature_aa64_tgran16_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN16_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran16(id));
+}
+
+static inline bool isar_feature_aa64_tgran64_2(const ARMISARegisters *id)
+{
+ unsigned t = FIELD_EX64(id->id_aa64mmfr0, ID_AA64MMFR0, TGRAN64_2);
+ return t >= 2 || (t == 0 && isar_feature_aa64_tgran64(id));
+}
+
static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index e1338ed..d7f578f 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -10287,20 +10287,105 @@ static int aa64_va_parameter_tcma(uint64_t tcr, ARMMMUIdx mmu_idx)
}
}
+static ARMGranuleSize tg0_to_gran_size(int tg)
+{
+ switch (tg) {
+ case 0:
+ return Gran4K;
+ case 1:
+ return Gran64K;
+ case 2:
+ return Gran16K;
+ default:
+ return GranInvalid;
+ }
+}
+
+static ARMGranuleSize tg1_to_gran_size(int tg)
+{
+ switch (tg) {
+ case 1:
+ return Gran16K;
+ case 2:
+ return Gran4K;
+ case 3:
+ return Gran64K;
+ default:
+ return GranInvalid;
+ }
+}
+
+static inline bool have4k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran4_2, cpu)
+ : cpu_isar_feature(aa64_tgran4, cpu);
+}
+
+static inline bool have16k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran16_2, cpu)
+ : cpu_isar_feature(aa64_tgran16, cpu);
+}
+
+static inline bool have64k(ARMCPU *cpu, bool stage2)
+{
+ return stage2 ? cpu_isar_feature(aa64_tgran64_2, cpu)
+ : cpu_isar_feature(aa64_tgran64, cpu);
+}
+
+static ARMGranuleSize sanitize_gran_size(ARMCPU *cpu, ARMGranuleSize gran,
+ bool stage2)
+{
+ switch (gran) {
+ case Gran4K:
+ if (have4k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case Gran16K:
+ if (have16k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case Gran64K:
+ if (have64k(cpu, stage2)) {
+ return gran;
+ }
+ break;
+ case GranInvalid:
+ break;
+ }
+ /*
+ * If the guest selects a granule size that isn't implemented,
+ * the architecture requires that we behave as if it selected one
+ * that is (with an IMPDEF choice of which one to pick). We choose
+ * to implement the smallest supported granule size.
+ */
+ if (have4k(cpu, stage2)) {
+ return Gran4K;
+ }
+ if (have16k(cpu, stage2)) {
+ return Gran16K;
+ }
+ assert(have64k(cpu, stage2));
+ return Gran64K;
+}
+
ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ARMMMUIdx mmu_idx, bool data)
{
uint64_t tcr = regime_tcr(env, mmu_idx);
bool epd, hpd, using16k, using64k, tsz_oob, ds;
int select, tsz, tbi, max_tsz, min_tsz, ps, sh;
+ ARMGranuleSize gran;
ARMCPU *cpu = env_archcpu(env);
+ bool stage2 = mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S;
if (!regime_has_2_ranges(mmu_idx)) {
select = 0;
tsz = extract32(tcr, 0, 6);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
- if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
+ if (stage2) {
/* VTCR_EL2 */
hpd = false;
} else {
@@ -10318,16 +10403,13 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
select = extract64(va, 55, 1);
if (!select) {
tsz = extract32(tcr, 0, 6);
+ gran = tg0_to_gran_size(extract32(tcr, 14, 2));
epd = extract32(tcr, 7, 1);
sh = extract32(tcr, 12, 2);
- using64k = extract32(tcr, 14, 1);
- using16k = extract32(tcr, 15, 1);
hpd = extract64(tcr, 41, 1);
} else {
- int tg = extract32(tcr, 30, 2);
- using16k = tg == 1;
- using64k = tg == 3;
tsz = extract32(tcr, 16, 6);
+ gran = tg1_to_gran_size(extract32(tcr, 30, 2));
epd = extract32(tcr, 23, 1);
sh = extract32(tcr, 28, 2);
hpd = extract64(tcr, 42, 1);
@@ -10336,6 +10418,10 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va,
ds = extract64(tcr, 59, 1);
}
+ gran = sanitize_gran_size(cpu, gran, stage2);
+ using64k = gran == Gran64K;
+ using16k = gran == Gran16K;
+
if (cpu_isar_feature(aa64_st, cpu)) {
max_tsz = 48 - using64k;
} else {
diff --git a/target/arm/internals.h b/target/arm/internals.h
index fd17aee..6166ac0 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -998,6 +998,15 @@ static inline uint32_t aarch64_pstate_valid_mask(const ARMISARegisters *id)
return valid;
}
+/* Granule size (i.e. page size) */
+typedef enum ARMGranuleSize {
+ /* Same order as TG0 encoding */
+ Gran4K,
+ Gran64K,
+ Gran16K,
+ GranInvalid,
+} ARMGranuleSize;
+
/*
* Parameters of a given virtual address, as extracted from the
* translation control register (TCR) for a given regime.