diff options
Diffstat (limited to 'target/arm/ptw.c')
-rw-r--r-- | target/arm/ptw.c | 357 |
1 files changed, 193 insertions, 164 deletions
diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 4476b32..561bf26 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -10,15 +10,14 @@ #include "qemu/log.h" #include "qemu/range.h" #include "qemu/main-loop.h" -#include "exec/exec-all.h" #include "exec/page-protection.h" +#include "exec/target_page.h" +#include "exec/tlb-flags.h" +#include "accel/tcg/probe.h" #include "cpu.h" #include "internals.h" #include "cpu-features.h" #include "idau.h" -#ifdef CONFIG_TCG -# include "tcg/oversized-guest.h" -#endif typedef struct S1Translate { /* @@ -74,17 +73,21 @@ typedef struct S1Translate { } S1Translate; static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi); static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi); +static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, + int user_rw, int prot_rw, int xn, int pxn, + ARMSecuritySpace in_pa, ARMSecuritySpace out_pa); + /* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */ static const uint8_t pamax_map[] = { [0] = 32, @@ -96,6 +99,21 @@ static const uint8_t pamax_map[] = { [6] = 52, }; +uint8_t round_down_to_parange_index(uint8_t bit_size) +{ + for (int i = ARRAY_SIZE(pamax_map) - 1; i >= 0; i--) { + if (pamax_map[i] <= bit_size) { + return i; + } + } + g_assert_not_reached(); +} + +uint8_t round_down_to_parange_bit_size(uint8_t bit_size) +{ + return pamax_map[round_down_to_parange_index(bit_size)]; +} + /* * The cpu-specific constant value of PAMax; also used by hw/arm/virt. * Note that machvirt_init calls this on a CPU that is inited but not realized! @@ -104,7 +122,7 @@ unsigned int arm_pamax(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { unsigned int parange = - FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE); /* * id_aa64mmfr0 is a read-only register so values outside of the @@ -265,6 +283,8 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, case ARMMMUIdx_E20_2_PAN: case ARMMMUIdx_E2: case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: break; case ARMMMUIdx_Phys_S: @@ -312,7 +332,7 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, * physical address size is invalid. */ pps = FIELD_EX64(gpccr, GPCCR, PPS); - if (pps > FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE)) { + if (pps > FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE)) { goto fault_walk; } pps = pamax_map[pps]; @@ -564,7 +584,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, }; GetPhysAddrResult s2 = { }; - if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, &s2, fi)) { + if (get_phys_addr_gpc(env, &s2ptw, addr, MMU_DATA_LOAD, 0, &s2, fi)) { goto fail; } @@ -717,7 +737,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, uint64_t new_val, S1Translate *ptw, ARMMMUFaultInfo *fi) { -#if defined(TARGET_AARCH64) && defined(CONFIG_TCG) +#if defined(CONFIG_ATOMIC64) && defined(CONFIG_TCG) uint64_t cur_val; void *host = ptw->out_host; @@ -819,7 +839,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, ptw->out_rw = true; } -#ifdef CONFIG_ATOMIC64 if (ptw->out_be) { old_val = cpu_to_be64(old_val); new_val = cpu_to_be64(new_val); @@ -831,36 +850,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, cur_val = qatomic_cmpxchg__nocheck((uint64_t *)host, old_val, new_val); cur_val = le64_to_cpu(cur_val); } -#else - /* - * We can't support the full 64-bit atomic cmpxchg on the host. - * Because this is only used for FEAT_HAFDBS, which is only for AA64, - * we know that TCG_OVERSIZED_GUEST is set, which means that we are - * running in round-robin mode and could only race with dma i/o. - */ -#if !TCG_OVERSIZED_GUEST -# error "Unexpected configuration" -#endif - bool locked = bql_locked(); - if (!locked) { - bql_lock(); - } - if (ptw->out_be) { - cur_val = ldq_be_p(host); - if (cur_val == old_val) { - stq_be_p(host, new_val); - } - } else { - cur_val = ldq_le_p(host); - if (cur_val == old_val) { - stq_le_p(host, new_val); - } - } - if (!locked) { - bql_unlock(); - } -#endif - return cur_val; #else /* AArch32 does not have FEAT_HADFS; non-TCG guests only use debug-mode. */ @@ -1131,7 +1120,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, hwaddr phys_addr; uint32_t dacr; bool ns; - int user_prot; + ARMSecuritySpace out_space; /* Pagetable walk. */ /* Lookup l1 descriptor. */ @@ -1223,16 +1212,19 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, g_assert_not_reached(); } } + out_space = ptw->in_space; + if (ns) { + /* + * The NS bit will (as required by the architecture) have no effect if + * the CPU doesn't support TZ or this is a non-secure translation + * regime, because the output space will already be non-secure. + */ + out_space = ARMSS_NonSecure; + } if (domain_prot == 3) { result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; } else { - if (pxn && !regime_is_user(env, mmu_idx)) { - xn = 1; - } - if (xn && access_type == MMU_INST_FETCH) { - fi->type = ARMFault_Permission; - goto do_fault; - } + int user_rw, prot_rw; if (arm_feature(env, ARM_FEATURE_V6K) && (regime_sctlr(env, mmu_idx) & SCTLR_AFE)) { @@ -1242,37 +1234,23 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, fi->type = ARMFault_AccessFlag; goto do_fault; } - result->f.prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); - user_prot = simple_ap_to_rw_prot_is_user(ap >> 1, 1); + prot_rw = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1); + user_rw = simple_ap_to_rw_prot_is_user(ap >> 1, 1); } else { - result->f.prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); - user_prot = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); - } - if (result->f.prot && !xn) { - result->f.prot |= PAGE_EXEC; + prot_rw = ap_to_rw_prot(env, mmu_idx, ap, domain_prot); + user_rw = ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, 1); } + + result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw, + xn, pxn, result->f.attrs.space, out_space); if (!(result->f.prot & (1 << access_type))) { /* Access permission fault. */ fi->type = ARMFault_Permission; goto do_fault; } - if (regime_is_pan(env, mmu_idx) && - !regime_is_user(env, mmu_idx) && - user_prot && - access_type != MMU_INST_FETCH) { - /* Privileged Access Never fault */ - fi->type = ARMFault_Permission; - goto do_fault; - } - } - if (ns) { - /* The NS bit will (as required by the architecture) have no effect if - * the CPU doesn't support TZ or this is a non-secure translation - * regime, because the attribute will already be non-secure. - */ - result->f.attrs.secure = false; - result->f.attrs.space = ARMSS_NonSecure; } + result->f.attrs.space = out_space; + result->f.attrs.secure = arm_space_is_secure(out_space); result->f.phys_addr = phys_addr; return false; do_fault: @@ -1340,25 +1318,24 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) * @env: CPUARMState * @mmu_idx: MMU index indicating required translation regime * @is_aa64: TRUE if AArch64 - * @ap: The 2-bit simple AP (AP[2:1]) + * @user_rw: Translated AP for user access + * @prot_rw: Translated AP for privileged access * @xn: XN (execute-never) bit * @pxn: PXN (privileged execute-never) bit * @in_pa: The original input pa space * @out_pa: The output pa space, modified by NSTable, NS, and NSE */ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, - int ap, int xn, int pxn, + int user_rw, int prot_rw, int xn, int pxn, ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) { ARMCPU *cpu = env_archcpu(env); bool is_user = regime_is_user(env, mmu_idx); - int prot_rw, user_rw; bool have_wxn; int wxn = 0; assert(!regime_is_stage2(mmu_idx)); - user_rw = simple_ap_to_rw_prot_is_user(ap, true); if (is_user) { prot_rw = user_rw; } else { @@ -1376,8 +1353,6 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, regime_is_pan(env, mmu_idx) && (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) { prot_rw = 0; - } else { - prot_rw = simple_ap_to_rw_prot_is_user(ap, false); } } @@ -1669,12 +1644,13 @@ static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) * @ptw: Current and next stage parameters for the walk. * @address: virtual address to get physical address for * @access_type: MMU_DATA_LOAD, MMU_DATA_STORE or MMU_INST_FETCH + * @memop: memory operation feeding this access, or 0 for none * @result: set on translation success, * @fi: set to fault info if the translation fails */ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, uint64_t address, - MMUAccessType access_type, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { ARMCPU *cpu = env_archcpu(env); @@ -1684,7 +1660,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, uint64_t ttbr; hwaddr descaddr, indexmask, indexmask_grainsize; uint32_t tableattrs; - target_ulong page_size; + uint64_t page_size; uint64_t attrs; int32_t stride; int addrsize, inputsize, outputsize; @@ -1727,7 +1703,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * ID_AA64MMFR0 is a read-only register so values outside of the * supported mappings can be considered an implementation error. */ - ps = FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + ps = FIELD_EX64_IDREG(&cpu->isar, ID_AA64MMFR0, PARANGE); ps = MIN(ps, param.ps); assert(ps < ARRAY_SIZE(pamax_map)); outputsize = pamax_map[ps]; @@ -1757,7 +1733,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * validation to do here. */ if (inputsize < addrsize) { - target_ulong top_bits = sextract64(address, inputsize, + uint64_t top_bits = sextract64(address, inputsize, addrsize - inputsize); if (-top_bits != param.select) { /* The gap between the two regions is a Translation fault */ @@ -2013,8 +1989,21 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, xn = extract64(attrs, 53, 2); result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); } + + result->cacheattrs.is_s2_format = true; + result->cacheattrs.attrs = extract32(attrs, 2, 4); + /* + * Security state does not really affect HCR_EL2.FWB; + * we only need to filter FWB for aa32 or other FEAT. + */ + device = S2_attrs_are_device(arm_hcr_el2_eff(env), + result->cacheattrs.attrs); } else { int nse, ns = extract32(attrs, 5, 1); + uint8_t attrindx; + uint64_t mair; + int user_rw, prot_rw; + switch (out_space) { case ARMSS_Root: /* @@ -2080,12 +2069,58 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, xn = 0; ap &= ~1; } + + user_rw = simple_ap_to_rw_prot_is_user(ap, true); + prot_rw = simple_ap_to_rw_prot_is_user(ap, false); /* * Note that we modified ptw->in_space earlier for NSTable, but * result->f.attrs retains a copy of the original security space. */ - result->f.prot = get_S1prot(env, mmu_idx, aarch64, ap, xn, pxn, - result->f.attrs.space, out_space); + result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, + xn, pxn, result->f.attrs.space, out_space); + + /* Index into MAIR registers for cache attributes */ + attrindx = extract32(attrs, 2, 3); + mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + assert(attrindx <= 7); + result->cacheattrs.is_s2_format = false; + result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); + + /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ + if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { + result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ + } + device = S1_attrs_are_device(result->cacheattrs.attrs); + } + + /* + * Enable alignment checks on Device memory. + * + * Per R_XCHFJ, the correct ordering for alignment, permission, + * and stage 2 faults is: + * - Alignment fault caused by the memory type + * - Permission fault + * - A stage 2 fault on the memory access + * Perform the alignment check now, so that we recognize it in + * the correct order. Set TLB_CHECK_ALIGNED so that any subsequent + * softmmu tlb hit will also check the alignment; clear along the + * non-device path so that tlb_fill_flags is consistent in the + * event of restart_atomic_update. + * + * In v7, for a CPU without the Virtualization Extensions this + * access is UNPREDICTABLE; we choose to make it take the alignment + * fault as is required for a v7VE CPU. (QEMU doesn't emulate any + * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) + */ + if (device) { + unsigned a_bits = memop_atomicity_bits(memop); + if (address & ((1 << a_bits) - 1)) { + fi->type = ARMFault_Alignment; + goto do_fault; + } + result->f.tlb_fill_flags = TLB_CHECK_ALIGNED; + } else { + result->f.tlb_fill_flags = 0; } if (!(result->f.prot & (1 << access_type))) { @@ -2115,51 +2150,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, result->f.attrs.space = out_space; result->f.attrs.secure = arm_space_is_secure(out_space); - if (regime_is_stage2(mmu_idx)) { - result->cacheattrs.is_s2_format = true; - result->cacheattrs.attrs = extract32(attrs, 2, 4); - /* - * Security state does not really affect HCR_EL2.FWB; - * we only need to filter FWB for aa32 or other FEAT. - */ - device = S2_attrs_are_device(arm_hcr_el2_eff(env), - result->cacheattrs.attrs); - } else { - /* Index into MAIR registers for cache attributes */ - uint8_t attrindx = extract32(attrs, 2, 3); - uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; - assert(attrindx <= 7); - result->cacheattrs.is_s2_format = false; - result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); - - /* When in aarch64 mode, and BTI is enabled, remember GP in the TLB. */ - if (aarch64 && cpu_isar_feature(aa64_bti, cpu)) { - result->f.extra.arm.guarded = extract64(attrs, 50, 1); /* GP */ - } - device = S1_attrs_are_device(result->cacheattrs.attrs); - } - - /* - * Enable alignment checks on Device memory. - * - * Per R_XCHFJ, this check is mis-ordered. The correct ordering - * for alignment, permission, and stage 2 faults should be: - * - Alignment fault caused by the memory type - * - Permission fault - * - A stage 2 fault on the memory access - * but due to the way the TCG softmmu TLB operates, we will have - * implicitly done the permission check and the stage2 lookup in - * finding the TLB entry, so the alignment check cannot be done sooner. - * - * In v7, for a CPU without the Virtualization Extensions this - * access is UNPREDICTABLE; we choose to make it take the alignment - * fault as is required for a v7VE CPU. (QEMU doesn't emulate any - * CPUs with ARM_FEATURE_LPAE but not ARM_FEATURE_V7VE anyway.) - */ - if (device) { - result->f.tlb_fill_flags |= TLB_CHECK_ALIGNED; - } - /* * For FEAT_LPA2 and effective DS, the SH field in the attributes * was re-purposed for output address bits. The SH attribute in @@ -3202,7 +3192,7 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr, */ static bool get_phys_addr_disabled(CPUARMState *env, S1Translate *ptw, - target_ulong address, + vaddr address, MMUAccessType access_type, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) @@ -3285,8 +3275,8 @@ static bool get_phys_addr_disabled(CPUARMState *env, } static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { @@ -3298,7 +3288,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, ARMSecuritySpace ipa_space; uint64_t hcr; - ret = get_phys_addr_nogpc(env, ptw, address, access_type, result, fi); + ret = get_phys_addr_nogpc(env, ptw, address, access_type, + memop, result, fi); /* If S1 fails, return early. */ if (ret) { @@ -3324,7 +3315,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, cacheattrs1 = result->cacheattrs; memset(result, 0, sizeof(*result)); - ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, result, fi); + ret = get_phys_addr_nogpc(env, ptw, ipa, access_type, + memop, result, fi); fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ @@ -3390,8 +3382,8 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, } static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { @@ -3454,7 +3446,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, if (arm_feature(env, ARM_FEATURE_EL2) && !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) { return get_phys_addr_twostage(env, ptw, address, access_type, - result, fi); + memop, result, fi); } /* fall through */ @@ -3517,7 +3509,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, } if (regime_using_lpae_format(env, mmu_idx)) { - return get_phys_addr_lpae(env, ptw, address, access_type, result, fi); + return get_phys_addr_lpae(env, ptw, address, access_type, + memop, result, fi); } else if (arm_feature(env, ARM_FEATURE_V7) || regime_sctlr(env, mmu_idx) & SCTLR_XP) { return get_phys_addr_v6(env, ptw, address, access_type, result, fi); @@ -3527,12 +3520,13 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, } static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, - target_ulong address, - MMUAccessType access_type, + vaddr address, + MMUAccessType access_type, MemOp memop, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { - if (get_phys_addr_nogpc(env, ptw, address, access_type, result, fi)) { + if (get_phys_addr_nogpc(env, ptw, address, access_type, + memop, result, fi)) { return true; } if (!granule_protection_check(env, result->f.phys_addr, @@ -3543,8 +3537,8 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, return false; } -bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, - MMUAccessType access_type, +bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, ARMSecuritySpace space, GetPhysAddrResult *result, ARMMMUFaultInfo *fi) @@ -3553,16 +3547,13 @@ bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address, .in_mmu_idx = mmu_idx, .in_space = space, }; - return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi); + return get_phys_addr_nogpc(env, &ptw, address, access_type, + memop, result, fi); } -bool get_phys_addr(CPUARMState *env, target_ulong address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - GetPhysAddrResult *result, ARMMMUFaultInfo *fi) +static ARMSecuritySpace +arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx) { - S1Translate ptw = { - .in_mmu_idx = mmu_idx, - }; ARMSecuritySpace ss; switch (mmu_idx) { @@ -3604,6 +3595,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, ss = ARMSS_Secure; break; case ARMMMUIdx_E3: + case ARMMMUIdx_E30_0: + case ARMMMUIdx_E30_3_PAN: if (arm_feature(env, ARM_FEATURE_AARCH64) && cpu_isar_feature(aa64_rme, env_archcpu(env))) { ss = ARMSS_Root; @@ -3621,27 +3614,33 @@ bool get_phys_addr(CPUARMState *env, target_ulong address, g_assert_not_reached(); } - ptw.in_space = ss; - return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi); + return ss; } -hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, - MemTxAttrs *attrs) +bool get_phys_addr(CPUARMState *env, vaddr address, + MMUAccessType access_type, MemOp memop, ARMMMUIdx mmu_idx, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) { - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - ARMMMUIdx mmu_idx = arm_mmu_idx(env); - ARMSecuritySpace ss = arm_security_space(env); S1Translate ptw = { .in_mmu_idx = mmu_idx, - .in_space = ss, + .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), + }; + + return get_phys_addr_gpc(env, &ptw, address, access_type, + memop, result, fi); +} + +static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr, + MemTxAttrs *attrs, ARMMMUIdx mmu_idx) +{ + S1Translate ptw = { + .in_mmu_idx = mmu_idx, + .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), .in_debug = true, }; GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; - bool ret; - - ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, &res, &fi); + bool ret = get_phys_addr_gpc(env, &ptw, addr, MMU_DATA_LOAD, 0, &res, &fi); *attrs = res.f.attrs; if (ret) { @@ -3649,3 +3648,33 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, } return res.f.phys_addr; } + +hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, + MemTxAttrs *attrs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + ARMMMUIdx mmu_idx = arm_mmu_idx(env); + + hwaddr res = arm_cpu_get_phys_page(env, addr, attrs, mmu_idx); + + if (res != -1) { + return res; + } + + /* + * Memory may be accessible for an "unprivileged load/store" variant. + * In this case, get_a64_user_mem_index function generates an op using an + * unprivileged mmu idx, so we need to try with it. + */ + switch (mmu_idx) { + case ARMMMUIdx_E10_1: + case ARMMMUIdx_E10_1_PAN: + return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E10_0); + case ARMMMUIdx_E20_2: + case ARMMMUIdx_E20_2_PAN: + return arm_cpu_get_phys_page(env, addr, attrs, ARMMMUIdx_E20_0); + default: + return -1; + } +} |