aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-10-01 09:22:54 -0700
committerPeter Maydell <peter.maydell@linaro.org>2022-10-10 14:52:25 +0100
commit448e42fdc1013b3497c9a6902f8052488fc8af1a (patch)
tree73f950f36dba6b82a31492b0ed63c9a8846fd853
parent2189c79858a9eadd85851afc2369d8679ecd563a (diff)
downloadqemu-448e42fdc1013b3497c9a6902f8052488fc8af1a.zip
qemu-448e42fdc1013b3497c9a6902f8052488fc8af1a.tar.gz
qemu-448e42fdc1013b3497c9a6902f8052488fc8af1a.tar.bz2
target/arm: Split out get_phys_addr_disabled
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20221001162318.153420-19-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--target/arm/ptw.c138
1 files changed, 74 insertions, 64 deletions
diff --git a/target/arm/ptw.c b/target/arm/ptw.c
index a589cec..96ab99c 100644
--- a/target/arm/ptw.c
+++ b/target/arm/ptw.c
@@ -2272,6 +2272,78 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
return ret;
}
+/*
+ * MMU disabled. S1 addresses within aa64 translation regimes are
+ * still checked for bounds -- see AArch64.S1DisabledOutput().
+ */
+static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
+ MMUAccessType access_type,
+ ARMMMUIdx mmu_idx, bool is_secure,
+ GetPhysAddrResult *result,
+ ARMMMUFaultInfo *fi)
+{
+ uint64_t hcr;
+ uint8_t memattr;
+
+ if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
+ int r_el = regime_el(env, mmu_idx);
+ if (arm_el_is_aa64(env, r_el)) {
+ int pamax = arm_pamax(env_archcpu(env));
+ uint64_t tcr = env->cp15.tcr_el[r_el];
+ int addrtop, tbi;
+
+ tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
+ if (access_type == MMU_INST_FETCH) {
+ tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
+ }
+ tbi = (tbi >> extract64(address, 55, 1)) & 1;
+ addrtop = (tbi ? 55 : 63);
+
+ if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
+ fi->type = ARMFault_AddressSize;
+ fi->level = 0;
+ fi->stage2 = false;
+ return 1;
+ }
+
+ /*
+ * When TBI is disabled, we've just validated that all of the
+ * bits above PAMax are zero, so logically we only need to
+ * clear the top byte for TBI. But it's clearer to follow
+ * the pseudocode set of addrdesc.paddress.
+ */
+ address = extract64(address, 0, 52);
+ }
+ }
+
+ result->phys = address;
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ result->page_size = TARGET_PAGE_SIZE;
+
+ /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
+ hcr = arm_hcr_el2_eff_secstate(env, is_secure);
+ result->cacheattrs.shareability = 0;
+ result->cacheattrs.is_s2_format = false;
+ if (hcr & HCR_DC) {
+ if (hcr & HCR_DCT) {
+ memattr = 0xf0; /* Tagged, Normal, WB, RWA */
+ } else {
+ memattr = 0xff; /* Normal, WB, RWA */
+ }
+ } else if (access_type == MMU_INST_FETCH) {
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
+ memattr = 0xee; /* Normal, WT, RA, NT */
+ } else {
+ memattr = 0x44; /* Normal, NC, No */
+ }
+ result->cacheattrs.shareability = 2; /* outer sharable */
+ } else {
+ memattr = 0x00; /* Device, nGnRnE */
+ }
+ result->cacheattrs.attrs = memattr;
+ return 0;
+}
+
bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
bool is_secure, GetPhysAddrResult *result,
@@ -2431,71 +2503,9 @@ bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
/* Definitely a real MMU, not an MPU */
if (regime_translation_disabled(env, mmu_idx, is_secure)) {
- uint64_t hcr;
- uint8_t memattr;
-
- /*
- * MMU disabled. S1 addresses within aa64 translation regimes are
- * still checked for bounds -- see AArch64.TranslateAddressS1Off.
- */
- if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
- int r_el = regime_el(env, mmu_idx);
- if (arm_el_is_aa64(env, r_el)) {
- int pamax = arm_pamax(env_archcpu(env));
- uint64_t tcr = env->cp15.tcr_el[r_el];
- int addrtop, tbi;
-
- tbi = aa64_va_parameter_tbi(tcr, mmu_idx);
- if (access_type == MMU_INST_FETCH) {
- tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx);
- }
- tbi = (tbi >> extract64(address, 55, 1)) & 1;
- addrtop = (tbi ? 55 : 63);
-
- if (extract64(address, pamax, addrtop - pamax + 1) != 0) {
- fi->type = ARMFault_AddressSize;
- fi->level = 0;
- fi->stage2 = false;
- return 1;
- }
-
- /*
- * When TBI is disabled, we've just validated that all of the
- * bits above PAMax are zero, so logically we only need to
- * clear the top byte for TBI. But it's clearer to follow
- * the pseudocode set of addrdesc.paddress.
- */
- address = extract64(address, 0, 52);
- }
- }
- result->phys = address;
- result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- result->page_size = TARGET_PAGE_SIZE;
-
- /* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
- hcr = arm_hcr_el2_eff_secstate(env, is_secure);
- result->cacheattrs.shareability = 0;
- result->cacheattrs.is_s2_format = false;
- if (hcr & HCR_DC) {
- if (hcr & HCR_DCT) {
- memattr = 0xf0; /* Tagged, Normal, WB, RWA */
- } else {
- memattr = 0xff; /* Normal, WB, RWA */
- }
- } else if (access_type == MMU_INST_FETCH) {
- if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
- memattr = 0xee; /* Normal, WT, RA, NT */
- } else {
- memattr = 0x44; /* Normal, NC, No */
- }
- result->cacheattrs.shareability = 2; /* outer sharable */
- } else {
- memattr = 0x00; /* Device, nGnRnE */
- }
- result->cacheattrs.attrs = memattr;
- return 0;
+ return get_phys_addr_disabled(env, address, access_type, mmu_idx,
+ is_secure, result, fi);
}
-
if (regime_using_lpae_format(env, mmu_idx)) {
return get_phys_addr_lpae(env, address, access_type, mmu_idx,
is_secure, false, result, fi);