From 11552bb0d95484c99233ec2f09c25261885d08ed Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Wed, 8 Jun 2022 19:38:51 +0100 Subject: target/arm: Move arm_{ldl,ldq}_ptw to ptw.c Move the ptw load functions, plus 3 common subroutines: S1_ptw_translate, ptw_attrs_are_device, and regime_translation_big_endian. This also allows get_phys_addr_lpae to become static again. Signed-off-by: Richard Henderson Message-id: 20220604040607.269301-17-richard.henderson@linaro.org Reviewed-by: Peter Maydell Signed-off-by: Peter Maydell --- target/arm/ptw.c | 160 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 154 insertions(+), 6 deletions(-) (limited to 'target/arm/ptw.c') diff --git a/target/arm/ptw.c b/target/arm/ptw.c index cbccf91..e4b860d 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -15,6 +15,154 @@ #include "ptw.h" +static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool s1_is_el0, hwaddr *phys_ptr, + MemTxAttrs *txattrs, int *prot, + target_ulong *page_size_ptr, + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) + __attribute__((nonnull)); + +static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx) +{ + return (regime_sctlr(env, mmu_idx) & SCTLR_EE) != 0; +} + +static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs) +{ + /* + * For an S1 page table walk, the stage 1 attributes are always + * some form of "this is Normal memory". The combined S1+S2 + * attributes are therefore only Device if stage 2 specifies Device. + * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00, + * ie when cacheattrs.attrs bits [3:2] are 0b00. + * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie + * when cacheattrs.attrs bit [2] is 0. + */ + assert(cacheattrs.is_s2_format); + if (arm_hcr_el2_eff(env) & HCR_FWB) { + return (cacheattrs.attrs & 0x4) == 0; + } else { + return (cacheattrs.attrs & 0xc) == 0; + } +} + +/* Translate a S1 pagetable walk through S2 if needed. */ +static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, + hwaddr addr, bool *is_secure, + ARMMMUFaultInfo *fi) +{ + if (arm_mmu_idx_is_stage1_of_2(mmu_idx) && + !regime_translation_disabled(env, ARMMMUIdx_Stage2)) { + target_ulong s2size; + hwaddr s2pa; + int s2prot; + int ret; + ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S + : ARMMMUIdx_Stage2; + ARMCacheAttrs cacheattrs = {}; + MemTxAttrs txattrs = {}; + + ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false, + &s2pa, &txattrs, &s2prot, &s2size, fi, + &cacheattrs); + if (ret) { + assert(fi->type != ARMFault_None); + fi->s2addr = addr; + fi->stage2 = true; + fi->s1ptw = true; + fi->s1ns = !*is_secure; + return ~0; + } + if ((arm_hcr_el2_eff(env) & HCR_PTW) && + ptw_attrs_are_device(env, cacheattrs)) { + /* + * PTW set and S1 walk touched S2 Device memory: + * generate Permission fault. + */ + fi->type = ARMFault_Permission; + fi->s2addr = addr; + fi->stage2 = true; + fi->s1ptw = true; + fi->s1ns = !*is_secure; + return ~0; + } + + if (arm_is_secure_below_el3(env)) { + /* Check if page table walk is to secure or non-secure PA space. */ + if (*is_secure) { + *is_secure = !(env->cp15.vstcr_el2.raw_tcr & VSTCR_SW); + } else { + *is_secure = !(env->cp15.vtcr_el2.raw_tcr & VTCR_NSW); + } + } else { + assert(!*is_secure); + } + + addr = s2pa; + } + return addr; +} + +/* All loads done in the course of a page table walk go through here. */ +static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, + ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = {}; + MemTxResult result = MEMTX_OK; + AddressSpace *as; + uint32_t data; + + addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); + attrs.secure = is_secure; + as = arm_addressspace(cs, attrs); + if (fi->s1ptw) { + return 0; + } + if (regime_translation_big_endian(env, mmu_idx)) { + data = address_space_ldl_be(as, addr, attrs, &result); + } else { + data = address_space_ldl_le(as, addr, attrs, &result); + } + if (result == MEMTX_OK) { + return data; + } + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; +} + +static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, + ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + MemTxAttrs attrs = {}; + MemTxResult result = MEMTX_OK; + AddressSpace *as; + uint64_t data; + + addr = S1_ptw_translate(env, mmu_idx, addr, &is_secure, fi); + attrs.secure = is_secure; + as = arm_addressspace(cs, attrs); + if (fi->s1ptw) { + return 0; + } + if (regime_translation_big_endian(env, mmu_idx)) { + data = address_space_ldq_be(as, addr, attrs, &result); + } else { + data = address_space_ldq_le(as, addr, attrs, &result); + } + if (result == MEMTX_OK) { + return data; + } + fi->type = ARMFault_SyncExternalOnWalk; + fi->ea = arm_extabort_type(result); + return 0; +} + static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx, uint32_t *table, uint32_t address) { @@ -338,12 +486,12 @@ do_fault: * @fi: set to fault info if the translation fails * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes */ -bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool s1_is_el0, - hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot, - target_ulong *page_size_ptr, - ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) +static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, + MMUAccessType access_type, ARMMMUIdx mmu_idx, + bool s1_is_el0, hwaddr *phys_ptr, + MemTxAttrs *txattrs, int *prot, + target_ulong *page_size_ptr, + ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) { ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); -- cgit v1.1