aboutsummaryrefslogtreecommitdiff
path: root/target/arm/ptw.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/ptw.c')
-rw-r--r--target/arm/ptw.c146
1 files changed, 143 insertions, 3 deletions
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index 28caa7a..989e783 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -375,9 +375,8 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
return false;
}
-void get_phys_addr_pmsav7_default(CPUARMState *env,
- ARMMMUIdx mmu_idx,
- int32_t address, int *prot)
+static void get_phys_addr_pmsav7_default(CPUARMState *env, ARMMMUIdx mmu_idx,
+ int32_t address, int *prot)
{
if (!arm_feature(env, ARM_FEATURE_M)) {
*prot = PAGE_READ | PAGE_WRITE;
@@ -605,6 +604,147 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
return !(*prot & (1 << access_type));
}
+bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, bool *is_subpage,
+ ARMMMUFaultInfo *fi, uint32_t *mregion)
+{
+ /*
+ * Perform a PMSAv8 MPU lookup (without also doing the SAU check
+ * that a full phys-to-virt translation does).
+ * mregion is (if not NULL) set to the region number which matched,
+ * or -1 if no region number is returned (MPU off, address did not
+ * hit a region, address hit in multiple regions).
+ * We set is_subpage to true if the region hit doesn't cover the
+ * entire TARGET_PAGE the address is within.
+ */
+ ARMCPU *cpu = env_archcpu(env);
+ bool is_user = regime_is_user(env, mmu_idx);
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ int n;
+ int matchregion = -1;
+ bool hit = false;
+ uint32_t addr_page_base = address & TARGET_PAGE_MASK;
+ uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
+
+ *is_subpage = false;
+ *phys_ptr = address;
+ *prot = 0;
+ if (mregion) {
+ *mregion = -1;
+ }
+
+ /*
+ * Unlike the ARM ARM pseudocode, we don't need to check whether this
+ * was an exception vector read from the vector table (which is always
+ * done using the default system address map), because those accesses
+ * are done in arm_v7m_load_vector(), which always does a direct
+ * read using address_space_ldl(), rather than going via this function.
+ */
+ if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
+ hit = true;
+ } else if (m_is_ppb_region(env, address)) {
+ hit = true;
+ } else {
+ if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
+ hit = true;
+ }
+
+ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ /* region search */
+ /*
+ * Note that the base address is bits [31:5] from the register
+ * with bits [4:0] all zeroes, but the limit address is bits
+ * [31:5] from the register with bits [4:0] all ones.
+ */
+ uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
+ uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
+
+ if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
+ /* Region disabled */
+ continue;
+ }
+
+ if (address < base || address > limit) {
+ /*
+ * Address not in this region. We must check whether the
+ * region covers addresses in the same page as our address.
+ * In that case we must not report a size that covers the
+ * whole page for a subsequent hit against a different MPU
+ * region or the background region, because it would result in
+ * incorrect TLB hits for subsequent accesses to addresses that
+ * are in this MPU region.
+ */
+ if (limit >= base &&
+ ranges_overlap(base, limit - base + 1,
+ addr_page_base,
+ TARGET_PAGE_SIZE)) {
+ *is_subpage = true;
+ }
+ continue;
+ }
+
+ if (base > addr_page_base || limit < addr_page_limit) {
+ *is_subpage = true;
+ }
+
+ if (matchregion != -1) {
+ /*
+ * Multiple regions match -- always a failure (unlike
+ * PMSAv7 where highest-numbered-region wins)
+ */
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return true;
+ }
+
+ matchregion = n;
+ hit = true;
+ }
+ }
+
+ if (!hit) {
+ /* background fault */
+ fi->type = ARMFault_Background;
+ return true;
+ }
+
+ if (matchregion == -1) {
+ /* hit using the background region */
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else {
+ uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
+ uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
+ bool pxn = false;
+
+ if (arm_feature(env, ARM_FEATURE_V8_1M)) {
+ pxn = extract32(env->pmsav8.rlar[secure][matchregion], 4, 1);
+ }
+
+ if (m_is_system_region(env, address)) {
+ /* System space is always execute never */
+ xn = 1;
+ }
+
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
+ if (*prot && !xn && !(pxn && !is_user)) {
+ *prot |= PAGE_EXEC;
+ }
+ /*
+ * We don't need to look the attribute up in the MAIR0/MAIR1
+ * registers because that only tells us about cacheability.
+ */
+ if (mregion) {
+ *mregion = matchregion;
+ }
+ }
+
+ fi->type = ARMFault_Permission;
+ fi->level = 1;
+ return !(*prot & (1 << access_type));
+}
+
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs,