aboutsummaryrefslogtreecommitdiff
path: root/target/arm/ptw.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/ptw.c')
-rw-r--r--target/arm/ptw.c608
1 files changed, 458 insertions, 150 deletions
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 4330900..d4386ed 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -10,8 +10,10 @@
#include "qemu/log.h"
#include "qemu/range.h"
#include "qemu/main-loop.h"
-#include "exec/exec-all.h"
#include "exec/page-protection.h"
+#include "exec/target_page.h"
+#include "exec/tlb-flags.h"
+#include "accel/tcg/probe.h"
#include "cpu.h"
#include "internals.h"
#include "cpu-features.h"
@@ -34,8 +36,6 @@ typedef struct S1Translate {
/*
* in_space: the security space for this walk. This plus
* the in_mmu_idx specify the architectural translation regime.
- * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
- * this field is updated accordingly.
*
* Note that the security space for the in_ptw_idx may be different
* from that for the in_mmu_idx. We do not need to explicitly track
@@ -51,17 +51,36 @@ typedef struct S1Translate {
*/
ARMSecuritySpace in_space;
/*
+ * Like in_space, except this may be "downgraded" to NonSecure
+ * by an NSTable bit.
+ */
+ ARMSecuritySpace cur_space;
+ /*
* in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
* accesses will not update the guest page table access flags
* and will not change the state of the softmmu TLBs.
*/
bool in_debug;
/*
+ * in_at: is this AccessType_AT?
+ * This is also set for debug, because at heart that is also
+ * an address translation, and simplifies a test.
+ */
+ bool in_at;
+ /*
* If this is stage 2 of a stage 1+2 page table walk, then this must
* be true if stage 1 is an EL0 access; otherwise this is ignored.
* Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
*/
bool in_s1_is_el0;
+ /*
+ * The set of PAGE_* bits to be use in the permission check.
+ * This is normally directly related to the access_type, but
+ * may be suppressed for debug or AT insns.
+ */
+ uint8_t in_prot_check;
+ /* Cached EffectiveHCR_EL2_NVx() bit */
+ bool in_nv1;
bool out_rw;
bool out_be;
ARMSecuritySpace out_space;
@@ -120,7 +139,7 @@ unsigned int arm_pamax(ARMCPU *cpu)
{
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
unsigned int parange =
- FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE);
/*
* id_aa64mmfr0 is a read-only register so values outside of the
@@ -150,6 +169,10 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
return ARMMMUIdx_Stage1_E1;
case ARMMMUIdx_E10_1_PAN:
return ARMMMUIdx_Stage1_E1_PAN;
+ case ARMMMUIdx_E10_0_GCS:
+ return ARMMMUIdx_Stage1_E0_GCS;
+ case ARMMMUIdx_E10_1_GCS:
+ return ARMMMUIdx_Stage1_E1_GCS;
default:
return mmu_idx;
}
@@ -191,9 +214,9 @@ static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
return ARMMMUIdx_Phys_Realm;
case ARMSS_Secure:
if (stage2idx == ARMMMUIdx_Stage2_S) {
- s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
+ s2walk_secure = !(env->cp15.vstcr_el2 & R_VSTCR_SW_MASK);
} else {
- s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
+ s2walk_secure = !(env->cp15.vtcr_el2 & R_VTCR_NSW_MASK);
}
return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
default:
@@ -216,9 +239,9 @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
return env->cp15.vsttbr_el2;
}
if (ttbrn == 0) {
- return env->cp15.ttbr0_el[regime_el(env, mmu_idx)];
+ return env->cp15.ttbr0_el[regime_el(mmu_idx)];
} else {
- return env->cp15.ttbr1_el[regime_el(env, mmu_idx)];
+ return env->cp15.ttbr1_el[regime_el(mmu_idx)];
}
}
@@ -257,8 +280,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_TGE) {
@@ -267,8 +292,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
break;
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
if (hcr_el2 & HCR_DC) {
@@ -277,10 +304,14 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
break;
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
break;
@@ -301,6 +332,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
ARMSecuritySpace pspace,
+ ARMSecuritySpace ss,
ARMMMUFaultInfo *fi)
{
MemTxAttrs attrs = {
@@ -330,7 +362,7 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
* physical address size is invalid.
*/
pps = FIELD_EX64(gpccr, GPCCR, PPS);
- if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) {
+ if (pps > FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE)) {
goto fault_walk;
}
pps = pamax_map[pps];
@@ -369,18 +401,37 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ);
/*
- * GPC Priority 2: Secure, Realm or Root address exceeds PPS.
+ * GPC Priority 2: Access to Secure, NonSecure or Realm is prevented
+ * by one of the GPCCR_EL3 address space disable bits (R_TCWMD).
+ * All of these bits are checked vs aa64_rme_gpc2 in gpccr_write.
+ */
+ {
+ static const uint8_t disable_masks[4] = {
+ [ARMSS_Secure] = R_GPCCR_SPAD_MASK,
+ [ARMSS_NonSecure] = R_GPCCR_NSPAD_MASK,
+ [ARMSS_Root] = 0,
+ [ARMSS_Realm] = R_GPCCR_RLPAD_MASK,
+ };
+
+ if (gpccr & disable_masks[pspace]) {
+ goto fault_fail;
+ }
+ }
+
+ /*
+ * GPC Priority 3: Secure, Realm or Root address exceeds PPS.
* R_CPDSB: A NonSecure physical address input exceeding PPS
* does not experience any fault.
+ * R_PBPSH: Other address spaces have fault suppressed by APPSAA.
*/
if (paddress & ~pps_mask) {
- if (pspace == ARMSS_NonSecure) {
+ if (pspace == ARMSS_NonSecure || FIELD_EX64(gpccr, GPCCR, APPSAA)) {
return true;
}
- goto fault_size;
+ goto fault_fail;
}
- /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */
+ /* GPC Priority 4: the base address of GPTBR_EL3 exceeds PPS. */
tableaddr = env->cp15.gptbr_el3 << 12;
if (tableaddr & ~pps_mask) {
goto fault_size;
@@ -461,18 +512,30 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress,
break;
case 0b1111: /* all access */
return true;
- case 0b1000:
- case 0b1001:
- case 0b1010:
- case 0b1011:
+ case 0b1000: /* secure */
+ if (!cpu_isar_feature(aa64_sel2, cpu)) {
+ goto fault_walk;
+ }
+ /* fall through */
+ case 0b1001: /* non-secure */
+ case 0b1010: /* root */
+ case 0b1011: /* realm */
if (pspace == (gpi & 3)) {
return true;
}
break;
+ case 0b1101: /* non-secure only */
+ /* aa64_rme_gpc2 was checked in gpccr_write */
+ if (FIELD_EX64(gpccr, GPCCR, NSO)) {
+ return (pspace == ARMSS_NonSecure &&
+ (ss == ARMSS_NonSecure || ss == ARMSS_Root));
+ }
+ goto fault_walk;
default:
goto fault_walk; /* reserved */
}
+ fault_fail:
fi->gpcf = GPCF_Fail;
goto fault_common;
fault_eabt:
@@ -573,12 +636,14 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
* From gdbstub, do not use softmmu so that we don't modify the
* state of the cpu at all, including softmmu tlb contents.
*/
- ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx);
+ ARMSecuritySpace s2_space
+ = S2_security_space(ptw->cur_space, s2_mmu_idx);
S1Translate s2ptw = {
.in_mmu_idx = s2_mmu_idx,
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
.in_space = s2_space,
.in_debug = true,
+ .in_prot_check = PAGE_READ,
};
GetPhysAddrResult s2 = { };
@@ -615,7 +680,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
}
if (regime_is_stage2(s2_mmu_idx)) {
- uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->cur_space);
if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
/*
@@ -626,7 +691,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
fi->s2addr = addr;
fi->stage2 = true;
fi->s1ptw = true;
- fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
+ fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx);
return false;
}
}
@@ -642,7 +707,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
fi->s2addr = addr;
fi->stage2 = regime_is_stage2(s2_mmu_idx);
fi->s1ptw = fi->stage2;
- fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
+ fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx);
return false;
}
@@ -735,7 +800,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
uint64_t new_val, S1Translate *ptw,
ARMMMUFaultInfo *fi)
{
-#if defined(TARGET_AARCH64) && defined(CONFIG_TCG)
+#if defined(CONFIG_ATOMIC64) && defined(CONFIG_TCG)
uint64_t cur_val;
void *host = ptw->out_host;
@@ -829,7 +894,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
fi->s2addr = ptw->out_virt;
fi->stage2 = true;
fi->s1ptw = true;
- fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx);
+ fi->s1ns = fault_s1ns(ptw->cur_space, ptw->in_ptw_idx);
return 0;
}
@@ -947,7 +1012,7 @@ static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
int ap, int domain_prot)
{
return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot,
- regime_is_user(env, mmu_idx));
+ regime_is_user(mmu_idx));
}
/*
@@ -973,7 +1038,7 @@ static int simple_ap_to_rw_prot_is_user(int ap, bool is_user)
static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
{
- return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
+ return simple_ap_to_rw_prot_is_user(ap, regime_is_user(mmu_idx));
}
static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
@@ -1006,7 +1071,7 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
}
type = (desc & 3);
domain = (desc >> 5) & 0x0f;
- if (regime_el(env, ptw->in_mmu_idx) == 1) {
+ if (regime_el(ptw->in_mmu_idx) == 1) {
dacr = env->cp15.dacr_ns;
} else {
dacr = env->cp15.dacr_s;
@@ -1059,11 +1124,10 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
result->f.lg_page_size = 12;
break;
- case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
+ case 3: /* 1k page, or ARMv6 "extended small (4k) page" */
if (type == 1) {
- /* ARMv6/XScale extended small page format */
- if (arm_feature(env, ARM_FEATURE_XSCALE)
- || arm_feature(env, ARM_FEATURE_V6)) {
+ /* ARMv6 extended small page format */
+ if (arm_feature(env, ARM_FEATURE_V6)) {
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
result->f.lg_page_size = 12;
} else {
@@ -1087,7 +1151,7 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw,
}
result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot);
result->f.prot |= result->f.prot ? PAGE_EXEC : 0;
- if (!(result->f.prot & (1 << access_type))) {
+ if (ptw->in_prot_check & ~result->f.prot) {
/* Access permission fault. */
fi->type = ARMFault_Permission;
goto do_fault;
@@ -1146,7 +1210,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
/* Page or Section. */
domain = (desc >> 5) & 0x0f;
}
- if (regime_el(env, mmu_idx) == 1) {
+ if (regime_el(mmu_idx) == 1) {
dacr = env->cp15.dacr_ns;
} else {
dacr = env->cp15.dacr_s;
@@ -1210,7 +1274,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
g_assert_not_reached();
}
}
- out_space = ptw->in_space;
+ out_space = ptw->cur_space;
if (ns) {
/*
* The NS bit will (as required by the architecture) have no effect if
@@ -1240,8 +1304,8 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw,
}
result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw,
- xn, pxn, result->f.attrs.space, out_space);
- if (!(result->f.prot & (1 << access_type))) {
+ xn, pxn, ptw->in_space, out_space);
+ if (ptw->in_prot_check & ~result->f.prot) {
/* Access permission fault. */
fi->type = ARMFault_Permission;
goto do_fault;
@@ -1264,7 +1328,7 @@ do_fault:
* @xn: XN (execute-never) bits
* @s1_is_el0: true if this is S2 of an S1+2 walk for EL0
*/
-static int get_S2prot_noexecute(int s2ap)
+static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
{
int prot = 0;
@@ -1274,12 +1338,6 @@ static int get_S2prot_noexecute(int s2ap)
if (s2ap & 2) {
prot |= PAGE_WRITE;
}
- return prot;
-}
-
-static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
-{
- int prot = get_S2prot_noexecute(s2ap);
if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) {
switch (xn) {
@@ -1311,6 +1369,44 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
return prot;
}
+static int get_S2prot_indirect(CPUARMState *env, GetPhysAddrResult *result,
+ int pi_index, int po_index, bool s1_is_el0)
+{
+ /* Last index is (priv, unpriv, ttw) */
+ static const uint8_t perm_table[16][3] = {
+ /* 0 */ { 0, 0, 0 }, /* no access */
+ /* 1 */ { 0, 0, 0 }, /* reserved */
+ /* 2 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 3 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 4 */ { PAGE_WRITE, PAGE_WRITE, 0 },
+ /* 5 */ { 0, 0, 0 }, /* reserved */
+ /* 6 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 7 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE },
+ /* 8 */ { PAGE_READ, PAGE_READ, PAGE_READ },
+ /* 9 */ { PAGE_READ, PAGE_READ | PAGE_EXEC, PAGE_READ },
+ /* A */ { PAGE_READ | PAGE_EXEC, PAGE_READ, PAGE_READ },
+ /* B */ { PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_EXEC, PAGE_READ },
+ /* C */ { PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE },
+ /* D */ { PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE },
+ /* E */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE,
+ PAGE_READ | PAGE_WRITE },
+ /* F */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ PAGE_READ | PAGE_WRITE },
+ };
+
+ uint64_t pir = (env->cp15.scr_el3 & SCR_PIEN ? env->cp15.s2pir_el2 : 0);
+ int s2pi = extract64(pir, pi_index * 4, 4);
+
+ result->f.prot = perm_table[s2pi][2];
+ return perm_table[s2pi][s1_is_el0];
+}
+
/*
* Translate section/page access permissions to protection flags
* @env: CPUARMState
@@ -1328,7 +1424,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
{
ARMCPU *cpu = env_archcpu(env);
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
bool have_wxn;
int wxn = 0;
@@ -1345,10 +1441,10 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
* We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0
* do not affect EPAN.
*/
- if (user_rw && regime_is_pan(env, mmu_idx)) {
+ if (user_rw && regime_is_pan(mmu_idx)) {
prot_rw = 0;
} else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 &&
- regime_is_pan(env, mmu_idx) &&
+ regime_is_pan(mmu_idx) &&
(regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) {
prot_rw = 0;
}
@@ -1405,7 +1501,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
xn = pxn || (user_rw & PAGE_WRITE);
}
} else if (arm_feature(env, ARM_FEATURE_V7)) {
- switch (regime_el(env, mmu_idx)) {
+ switch (regime_el(mmu_idx)) {
case 1:
case 3:
if (is_user) {
@@ -1432,11 +1528,115 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
return prot_rw | PAGE_EXEC;
}
+/* Extra page permission bits, during get_S1prot_indirect only. */
+#define PAGE_GCS (1 << 3)
+#define PAGE_WXN (1 << 4)
+#define PAGE_OVERLAY (1 << 5)
+QEMU_BUILD_BUG_ON(PAGE_RWX & (PAGE_GCS | PAGE_WXN | PAGE_OVERLAY));
+
+static int get_S1prot_indirect(CPUARMState *env, S1Translate *ptw,
+ ARMMMUIdx mmu_idx, int pi_index, int po_index,
+ ARMSecuritySpace in_pa, ARMSecuritySpace out_pa)
+{
+ static const uint8_t perm_table[16] = {
+ /* 0 */ PAGE_OVERLAY, /* no access */
+ /* 1 */ PAGE_OVERLAY | PAGE_READ,
+ /* 2 */ PAGE_OVERLAY | PAGE_EXEC,
+ /* 3 */ PAGE_OVERLAY | PAGE_READ | PAGE_EXEC,
+ /* 4 */ PAGE_OVERLAY, /* reserved */
+ /* 5 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE,
+ /* 6 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_WXN,
+ /* 7 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ /* 8 */ PAGE_READ,
+ /* 9 */ PAGE_READ | PAGE_GCS,
+ /* A */ PAGE_READ | PAGE_EXEC,
+ /* B */ 0, /* reserved */
+ /* C */ PAGE_READ | PAGE_WRITE,
+ /* D */ 0, /* reserved */
+ /* E */ PAGE_READ | PAGE_WRITE | PAGE_EXEC,
+ /* F */ 0, /* reserved */
+ };
+
+ uint32_t el = regime_el(mmu_idx);
+ uint64_t pir = env->cp15.pir_el[el];
+ uint64_t pire0 = 0;
+ int perm;
+
+ if (el < 3) {
+ if (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_PIEN)) {
+ pir = 0;
+ } else if (el == 2) {
+ pire0 = env->cp15.pire0_el2;
+ } else if (!ptw->in_nv1) {
+ pire0 = env->cp15.pir_el[0];
+ }
+ }
+ perm = perm_table[extract64(pir, pi_index * 4, 4)];
+
+ if (regime_has_2_ranges(mmu_idx)) {
+ int p_perm = perm;
+ int u_perm = perm_table[extract64(pire0, pi_index * 4, 4)];
+
+ if ((p_perm & (PAGE_EXEC | PAGE_GCS)) &&
+ (u_perm & (PAGE_WRITE | PAGE_GCS))) {
+ p_perm &= ~(PAGE_RWX | PAGE_GCS);
+ u_perm &= ~(PAGE_RWX | PAGE_GCS);
+ }
+ if ((u_perm & (PAGE_RWX | PAGE_GCS)) && regime_is_pan(mmu_idx)) {
+ p_perm &= ~(PAGE_READ | PAGE_WRITE);
+ }
+ perm = regime_is_user(mmu_idx) ? u_perm : p_perm;
+ }
+
+ if (in_pa != out_pa) {
+ switch (in_pa) {
+ case ARMSS_Root:
+ /*
+ * R_ZWRVD: permission fault for insn fetched from non-Root,
+ * I_WWBFB: SIF has no effect in EL3.
+ */
+ perm &= ~(PAGE_EXEC | PAGE_GCS);
+ break;
+ case ARMSS_Realm:
+ /*
+ * R_PKTDS: permission fault for insn fetched from non-Realm,
+ * for Realm EL2 or EL2&0. The corresponding fault for EL1&0
+ * happens during any stage2 translation.
+ */
+ if (el == 2) {
+ perm &= ~(PAGE_EXEC | PAGE_GCS);
+ }
+ break;
+ case ARMSS_Secure:
+ if (env->cp15.scr_el3 & SCR_SIF) {
+ perm &= ~(PAGE_EXEC | PAGE_GCS);
+ }
+ break;
+ default:
+ /* Input NonSecure must have output NonSecure. */
+ g_assert_not_reached();
+ }
+ }
+
+ if (regime_is_gcs(mmu_idx)) {
+ /*
+ * Note that the one s1perms.gcs bit controls both read and write
+ * access via AccessType_GCS. See AArch64.S1CheckPermissions.
+ */
+ perm = (perm & PAGE_GCS ? PAGE_READ | PAGE_WRITE : 0);
+ } else if (perm & PAGE_WXN) {
+ perm &= ~PAGE_EXEC;
+ }
+
+ return perm & PAGE_RWX;
+}
+
static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
ARMMMUIdx mmu_idx)
{
uint64_t tcr = regime_tcr(env, mmu_idx);
- uint32_t el = regime_el(env, mmu_idx);
+ uint32_t el = regime_el(mmu_idx);
int select, tsz;
bool epd, hpd;
@@ -1457,8 +1657,12 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va,
}
tsz = sextract32(tcr, 0, 4) + 8;
select = 0;
- hpd = false;
epd = false;
+ /*
+ * Stage2 does not have hierarchical permissions.
+ * Thus disabling them makes things easier during ptw.
+ */
+ hpd = true;
} else if (el == 2) {
/* HTCR */
tsz = extract32(tcr, 0, 3);
@@ -1623,12 +1827,6 @@ static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
}
}
-static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw)
-{
- uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
- return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
-}
-
/**
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
*
@@ -1658,13 +1856,13 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
uint64_t ttbr;
hwaddr descaddr, indexmask, indexmask_grainsize;
uint32_t tableattrs;
- target_ulong page_size;
+ uint64_t page_size;
uint64_t attrs;
int32_t stride;
int addrsize, inputsize, outputsize;
uint64_t tcr = regime_tcr(env, mmu_idx);
- int ap, xn, pxn;
- uint32_t el = regime_el(env, mmu_idx);
+ int ap, prot;
+ uint32_t el = regime_el(mmu_idx);
uint64_t descaddrmask;
bool aarch64 = arm_el_is_aa64(env, el);
uint64_t descriptor, new_descriptor;
@@ -1681,6 +1879,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
level = 0;
/*
+ * Cache NV1 before we adjust ptw->in_space for NSTable.
+ * Note that this is only relevant for EL1&0, and that
+ * computing it would assert for ARMSS_Root.
+ */
+ if (el == 1) {
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
+ ptw->in_nv1 = (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1);
+ }
+
+ /*
* If TxSZ is programmed to a value larger than the maximum,
* or smaller than the effective minimum, it is IMPLEMENTATION
* DEFINED whether we behave as if the field were programmed
@@ -1701,7 +1909,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* ID_AA64MMFR0 is a read-only register so values outside of the
* supported mappings can be considered an implementation error.
*/
- ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE);
+ ps = FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE);
ps = MIN(ps, param.ps);
assert(ps < ARRAY_SIZE(pamax_map));
outputsize = pamax_map[ps];
@@ -1731,7 +1939,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* validation to do here.
*/
if (inputsize < addrsize) {
- target_ulong top_bits = sextract64(address, inputsize,
+ uint64_t top_bits = sextract64(address, inputsize,
addrsize - inputsize);
if (-top_bits != param.select) {
/* The gap between the two regions is a Translation fault */
@@ -1843,7 +2051,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* NonSecure. With RME, the EL3 translation regime does not change
* from Root to NonSecure.
*/
- if (ptw->in_space == ARMSS_Secure
+ if (ptw->cur_space == ARMSS_Secure
&& !regime_is_stage2(mmu_idx)
&& extract32(tableattrs, 4, 1)) {
/*
@@ -1853,7 +2061,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
ptw->in_ptw_idx += 1;
- ptw->in_space = ARMSS_NonSecure;
+ ptw->cur_space = ARMSS_NonSecure;
}
if (!S1_ptw_translate(env, ptw, descaddr, fi)) {
@@ -1920,7 +2128,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
descaddr &= ~(hwaddr)(page_size - 1);
descaddr |= (address & (page_size - 1));
- if (likely(!ptw->in_debug)) {
+ /*
+ * For AccessType_AT, DB is not updated (AArch64.SetDirtyFlag),
+ * and it is IMPLEMENTATION DEFINED whether AF is updated
+ * (AArch64.SetAccessFlag; qemu chooses to not update).
+ */
+ if (likely(!ptw->in_at)) {
/*
* Access flag.
* If HA is enabled, prepare to update the descriptor below.
@@ -1959,21 +2172,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
* except NSTable (which we have already handled).
*/
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
- if (!regime_is_stage2(mmu_idx)) {
- if (!param.hpd) {
- attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
- /*
- * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
- * means "force PL1 access only", which means forcing AP[1] to 0.
- */
- attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
- attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
- }
+ if (!param.hpd) {
+ attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
+ /*
+ * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
+ * means "force PL1 access only", which means forcing AP[1] to 0.
+ */
+ attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */
+ attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */
}
ap = extract32(attrs, 6, 2);
- out_space = ptw->in_space;
+ out_space = ptw->cur_space;
if (regime_is_stage2(mmu_idx)) {
+ if (param.pie) {
+ int pi = extract64(attrs, 6, 1)
+ | (extract64(attrs, 51, 1) << 1)
+ | (extract64(attrs, 53, 2) << 2);
+ int po = extract64(attrs, 60, 3);
+ prot = get_S2prot_indirect(env, result, pi, po, ptw->in_s1_is_el0);
+ } else {
+ int xn = extract64(attrs, 53, 2);
+ prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
+ /* Install TTW permissions in f.prot. */
+ result->f.prot = prot & (PAGE_READ | PAGE_WRITE);
+ }
/*
* R_GYNXY: For stage2 in Realm security state, bit 55 is NS.
* The bit remains ignored for other security states.
@@ -1982,11 +2205,9 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
*/
if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) {
out_space = ARMSS_NonSecure;
- result->f.prot = get_S2prot_noexecute(ap);
- } else {
- xn = extract64(attrs, 53, 2);
- result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0);
+ prot &= ~PAGE_EXEC;
}
+ result->s2prot = prot;
result->cacheattrs.is_s2_format = true;
result->cacheattrs.attrs = extract32(attrs, 2, 4);
@@ -2000,7 +2221,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
int nse, ns = extract32(attrs, 5, 1);
uint8_t attrindx;
uint64_t mair;
- int user_rw, prot_rw;
switch (out_space) {
case ARMSS_Root:
@@ -2049,37 +2269,57 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
default:
g_assert_not_reached();
}
- xn = extract64(attrs, 54, 1);
- pxn = extract64(attrs, 53, 1);
- if (el == 1 && nv_nv1_enabled(env, ptw)) {
+ if (param.pie) {
+ int pi = extract64(attrs, 6, 1)
+ | (extract64(attrs, 51, 1) << 1)
+ | (extract64(attrs, 53, 2) << 2);
+ int po = extract64(attrs, 60, 3);
/*
- * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page
- * descriptor bit 54 holds PXN, 53 is RES0, and the effective value
- * of UXN is 0. Similarly for bits 59 and 60 in table descriptors
- * (which we have already folded into bits 53 and 54 of attrs).
- * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
- * Similarly, APTable[0] from the table descriptor is treated as 0;
- * we already folded this into AP[1] and squashing that to 0 does
- * the right thing.
+ * Note that we modified ptw->in_space earlier for NSTable, but
+ * result->f.attrs retains a copy of the original security space.
*/
- pxn = xn;
- xn = 0;
- ap &= ~1;
- }
+ prot = get_S1prot_indirect(env, ptw, mmu_idx, pi, po,
+ result->f.attrs.space, out_space);
+ } else if (regime_is_gcs(mmu_idx)) {
+ /*
+ * While one must use indirect permissions to successfully
+ * use GCS instructions, AArch64.S1DirectBasePermissions
+ * faithfully supplies s1perms.gcs = 0, Just In Case.
+ */
+ prot = 0;
+ } else {
+ int xn = extract64(attrs, 54, 1);
+ int pxn = extract64(attrs, 53, 1);
+ int user_rw, prot_rw;
- user_rw = simple_ap_to_rw_prot_is_user(ap, true);
- prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
- /*
- * Note that we modified ptw->in_space earlier for NSTable, but
- * result->f.attrs retains a copy of the original security space.
- */
- result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
- xn, pxn, result->f.attrs.space, out_space);
+ if (el == 1 && ptw->in_nv1) {
+ /*
+ * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1},
+ * the block/page descriptor bit 54 holds PXN,
+ * 53 is RES0, and the effective value of UXN is 0.
+ * Similarly for bits 59 and 60 in table descriptors
+ * (which we have already folded into bits 53 and 54 of attrs).
+ * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0.
+ * Similarly, APTable[0] from the table descriptor is treated
+ * as 0; we already folded this into AP[1] and squashing
+ * that to 0 does the right thing.
+ */
+ pxn = xn;
+ xn = 0;
+ ap &= ~1;
+ }
+
+ user_rw = simple_ap_to_rw_prot_is_user(ap, true);
+ prot_rw = simple_ap_to_rw_prot_is_user(ap, false);
+ prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw,
+ xn, pxn, ptw->in_space, out_space);
+ }
+ result->f.prot = prot;
/* Index into MAIR registers for cache attributes */
attrindx = extract32(attrs, 2, 3);
- mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ mair = env->cp15.mair_el[regime_el(mmu_idx)];
assert(attrindx <= 7);
result->cacheattrs.is_s2_format = false;
result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
@@ -2121,11 +2361,27 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
result->f.tlb_fill_flags = 0;
}
- if (!(result->f.prot & (1 << access_type))) {
+ if (ptw->in_prot_check & ~prot) {
fi->type = ARMFault_Permission;
goto do_fault;
}
+ /* S1PIE and S2PIE both have a bit for software dirty page tracking. */
+ if (access_type == MMU_DATA_STORE && param.pie) {
+ /*
+ * For S1PIE, bit 7 is nDirty and both HA and HD are checked.
+ * For S2PIE, bit 7 is Dirty and only HD is checked.
+ */
+ bool bit7 = extract64(attrs, 7, 1);
+ if (regime_is_stage2(mmu_idx)
+ ? !bit7 && !param.hd
+ : bit7 && !(param.ha && param.hd)) {
+ fi->type = ARMFault_Permission;
+ fi->dirtybit = true;
+ goto do_fault;
+ }
+ }
+
/* If FEAT_HAFDBS has made changes, update the PTE. */
if (new_descriptor != descriptor) {
new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi);
@@ -2173,7 +2429,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
fi->level = level;
fi->stage2 = regime_is_stage2(mmu_idx);
}
- fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx);
+ fi->s1ns = fault_s1ns(ptw->cur_space, mmu_idx);
return true;
}
@@ -2188,7 +2444,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env,
uint32_t mask;
uint32_t base;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
/* MPU disabled. */
@@ -2355,7 +2611,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env,
ARMCPU *cpu = env_archcpu(env);
int n;
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
bool secure = arm_space_is_secure(ptw->in_space);
result->f.phys_addr = address;
@@ -2535,13 +2791,13 @@ static bool get_phys_addr_pmsav7(CPUARMState *env,
fi->type = ARMFault_Permission;
fi->level = 1;
- return !(result->f.prot & (1 << access_type));
+ return (ptw->in_prot_check & ~result->f.prot) != 0;
}
static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t secure)
{
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
return env->pmsav8.hprbar;
} else {
return env->pmsav8.rbar[secure];
@@ -2551,7 +2807,7 @@ static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx,
static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
uint32_t secure)
{
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
return env->pmsav8.hprlar;
} else {
return env->pmsav8.rlar[secure];
@@ -2559,8 +2815,9 @@ static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx,
}
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- bool secure, GetPhysAddrResult *result,
+ MMUAccessType access_type, unsigned prot_check,
+ ARMMMUIdx mmu_idx, bool secure,
+ GetPhysAddrResult *result,
ARMMMUFaultInfo *fi, uint32_t *mregion)
{
/*
@@ -2574,7 +2831,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
* memory system to use a subpage.
*/
ARMCPU *cpu = env_archcpu(env);
- bool is_user = regime_is_user(env, mmu_idx);
+ bool is_user = regime_is_user(mmu_idx);
int n;
int matchregion = -1;
bool hit = false;
@@ -2582,7 +2839,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
int region_counter;
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
region_counter = cpu->pmsav8r_hdregion;
} else {
region_counter = cpu->pmsav7_dregion;
@@ -2708,7 +2965,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
xn = 1;
}
- if (regime_el(env, mmu_idx) == 2) {
+ if (regime_el(mmu_idx) == 2) {
result->f.prot = simple_ap_to_rw_prot_is_user(ap,
mmu_idx != ARMMMUIdx_E2);
} else {
@@ -2717,7 +2974,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
if (!arm_feature(env, ARM_FEATURE_M)) {
uint8_t attrindx = extract32(matched_rlar, 1, 3);
- uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ uint64_t mair = env->cp15.mair_el[regime_el(mmu_idx)];
uint8_t sh = extract32(matched_rlar, 3, 2);
if (regime_sctlr(env, mmu_idx) & SCTLR_WXN &&
@@ -2725,7 +2982,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
xn = 0x1;
}
- if ((regime_el(env, mmu_idx) == 1) &&
+ if ((regime_el(mmu_idx) == 1) &&
regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) {
pxn = 0x1;
}
@@ -2748,7 +3005,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
if (arm_feature(env, ARM_FEATURE_M)) {
fi->level = 1;
}
- return !(result->f.prot & (1 << access_type));
+ return (prot_check & ~result->f.prot) != 0;
}
static bool v8m_is_sau_exempt(CPUARMState *env,
@@ -2950,8 +3207,8 @@ static bool get_phys_addr_pmsav8(CPUARMState *env,
}
}
- ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
- result, fi, NULL);
+ ret = pmsav8_mpu_lookup(env, address, access_type, ptw->in_prot_check,
+ mmu_idx, secure, result, fi, NULL);
if (sattrs.subpage) {
result->f.lg_page_size = 0;
}
@@ -3210,7 +3467,7 @@ static bool get_phys_addr_disabled(CPUARMState *env,
break;
default:
- r_el = regime_el(env, mmu_idx);
+ r_el = regime_el(mmu_idx);
if (arm_el_is_aa64(env, r_el)) {
int pamax = arm_pamax(env_archcpu(env));
uint64_t tcr = env->cp15.tcr_el[r_el];
@@ -3318,7 +3575,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
- result->f.prot &= s1_prot;
+ result->f.prot = s1_prot & result->s2prot;
/* If S2 fails, return early. */
if (ret) {
@@ -3370,9 +3627,9 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
*/
if (in_space == ARMSS_Secure) {
result->f.attrs.secure =
- !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW))
+ !(env->cp15.vstcr_el2 & (R_VSTCR_SA_MASK | R_VSTCR_SW_MASK))
&& (ipa_secure
- || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW)));
+ || !(env->cp15.vtcr_el2 & (R_VTCR_NSA_MASK | R_VTCR_NSW_MASK)));
result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure);
}
@@ -3393,6 +3650,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
* cannot upgrade a NonSecure translation regime's attributes
* to Secure or Realm.
*/
+ ptw->cur_space = ptw->in_space;
result->f.attrs.space = ptw->in_space;
result->f.attrs.secure = arm_space_is_secure(ptw->in_space);
@@ -3454,7 +3712,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
break;
}
- result->f.attrs.user = regime_is_user(env, mmu_idx);
+ result->f.attrs.user = regime_is_user(mmu_idx);
/*
* Fast Context Switch Extension. This doesn't exist at all in v8.
@@ -3462,7 +3720,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
*/
if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
&& !arm_feature(env, ARM_FEATURE_V8)) {
- if (regime_el(env, mmu_idx) == 3) {
+ if (regime_el(mmu_idx) == 3) {
address += env->cp15.fcseidr_s;
} else {
address += env->cp15.fcseidr_ns;
@@ -3528,47 +3786,58 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
return true;
}
if (!granule_protection_check(env, result->f.phys_addr,
- result->f.attrs.space, fi)) {
+ result->f.attrs.space, ptw->in_space, fi)) {
fi->type = ARMFault_GPCFOnOutput;
return true;
}
return false;
}
-bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address,
- MMUAccessType access_type, MemOp memop,
- ARMMMUIdx mmu_idx, ARMSecuritySpace space,
- GetPhysAddrResult *result,
- ARMMMUFaultInfo *fi)
+bool get_phys_addr_for_at(CPUARMState *env, vaddr address,
+ unsigned prot_check, ARMMMUIdx mmu_idx,
+ ARMSecuritySpace space, GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi)
{
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
.in_space = space,
+ .in_at = true,
+ .in_prot_check = prot_check,
};
- return get_phys_addr_nogpc(env, &ptw, address, access_type,
- memop, result, fi);
+ /*
+ * I_MXTJT: Granule protection checks are not performed on the final
+ * address of a successful translation. This is a translation not a
+ * memory reference, so MMU_DATA_LOAD is arbitrary (the exact protection
+ * check is handled or bypassed by .in_prot_check) and "memop = MO_8"
+ * bypasses any alignment check.
+ */
+ return get_phys_addr_nogpc(env, &ptw, address,
+ MMU_DATA_LOAD, MO_8, result, fi);
}
-bool get_phys_addr(CPUARMState *env, vaddr address,
- MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
- GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+static ARMSecuritySpace
+arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx)
{
- S1Translate ptw = {
- .in_mmu_idx = mmu_idx,
- };
ARMSecuritySpace ss;
switch (mmu_idx) {
case ARMMMUIdx_E10_0:
+ case ARMMMUIdx_E10_0_GCS:
case ARMMMUIdx_E10_1:
case ARMMMUIdx_E10_1_PAN:
+ case ARMMMUIdx_E10_1_GCS:
case ARMMMUIdx_E20_0:
+ case ARMMMUIdx_E20_0_GCS:
case ARMMMUIdx_E20_2:
case ARMMMUIdx_E20_2_PAN:
+ case ARMMMUIdx_E20_2_GCS:
case ARMMMUIdx_Stage1_E0:
+ case ARMMMUIdx_Stage1_E0_GCS:
case ARMMMUIdx_Stage1_E1:
case ARMMMUIdx_Stage1_E1_PAN:
+ case ARMMMUIdx_Stage1_E1_GCS:
case ARMMMUIdx_E2:
+ case ARMMMUIdx_E2_GCS:
ss = arm_security_space_below_el3(env);
break;
case ARMMMUIdx_Stage2:
@@ -3597,6 +3866,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
ss = ARMSS_Secure;
break;
case ARMMMUIdx_E3:
+ case ARMMMUIdx_E3_GCS:
case ARMMMUIdx_E30_0:
case ARMMMUIdx_E30_3_PAN:
if (arm_feature(env, ARM_FEATURE_AARCH64) &&
@@ -3616,28 +3886,36 @@ bool get_phys_addr(CPUARMState *env, vaddr address,
g_assert_not_reached();
}
- ptw.in_space = ss;
+ return ss;
+}
+
+bool get_phys_addr(CPUARMState *env, vaddr address,
+ MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx,
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
+{
+ S1Translate ptw = {
+ .in_mmu_idx = mmu_idx,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
+ .in_prot_check = 1 << access_type,
+ };
+
return get_phys_addr_gpc(env, &ptw, address, access_type,
memop, result, fi);
}
-hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
- MemTxAttrs *attrs)
+static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr,
+ MemTxAttrs *attrs, ARMMMUIdx mmu_idx)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- ARMMMUIdx mmu_idx = arm_mmu_idx(env);
- ARMSecuritySpace ss = arm_security_space(env);
S1Translate ptw = {
.in_mmu_idx = mmu_idx,
- .in_space = ss,
+ .in_space = arm_mmu_idx_to_security_space(env, mmu_idx),
.in_debug = true,
+ .in_at = true,
+ .in_prot_check = 0,
};
GetPhysAddrResult res = {};
ARMMMUFaultInfo fi = {};
- bool ret;
-
- ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
+ bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi);
*attrs = res.f.attrs;
if (ret) {
@@ -3645,3 +3923,33 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
}
return res.f.phys_addr;
}
+
+hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
+ MemTxAttrs *attrs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+
+ hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx);
+
+ if (res != -1) {
+ return res;
+ }
+
+ /*
+ * Memory may be accessible for an "unprivileged load/store" variant.
+ * In this case, get_a64_user_mem_index function generates an op using an
+ * unprivileged mmu idx, so we need to try with it.
+ */
+ switch (mmu_idx) {
+ case ARMMMUIdx_E10_1:
+ case ARMMMUIdx_E10_1_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0);
+ case ARMMMUIdx_E20_2:
+ case ARMMMUIdx_E20_2_PAN:
+ return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0);
+ default:
+ return -1;
+ }
+}