aboutsummaryrefslogtreecommitdiff
path: root/target/arm/tcg/m_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/arm/tcg/m_helper.c')
-rw-r--r--target/arm/tcg/m_helper.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c
index 6614719..28307b5 100644
--- a/target/arm/tcg/m_helper.c
+++ b/target/arm/tcg/m_helper.c
@@ -632,8 +632,11 @@ void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
}
/* Note that these stores can throw exceptions on MPU faults */
- cpu_stl_data_ra(env, sp, nextinst, GETPC());
- cpu_stl_data_ra(env, sp + 4, saved_psr, GETPC());
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
+ arm_to_core_mmu_idx(mmu_idx));
+ cpu_stl_mmu(env, sp, nextinst, oi, GETPC());
+ cpu_stl_mmu(env, sp + 4, saved_psr, oi, GETPC());
env->regs[13] = sp;
env->regs[14] = 0xfeffffff;
@@ -1048,6 +1051,9 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
bool s = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
bool lspact = env->v7m.fpccr[s] & R_V7M_FPCCR_LSPACT_MASK;
uintptr_t ra = GETPC();
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
+ arm_to_core_mmu_idx(mmu_idx));
assert(env->v7m.secure);
@@ -1073,7 +1079,7 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
* Note that we do not use v7m_stack_write() here, because the
* accesses should not set the FSR bits for stacking errors if they
* fail. (In pseudocode terms, they are AccType_NORMAL, not AccType_STACK
- * or AccType_LAZYFP). Faults in cpu_stl_data_ra() will throw exceptions
+ * or AccType_LAZYFP). Faults in cpu_stl_mmu() will throw exceptions
* and longjmp out.
*/
if (!(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_LSPEN_MASK)) {
@@ -1089,12 +1095,12 @@ void HELPER(v7m_vlstm)(CPUARMState *env, uint32_t fptr)
if (i >= 16) {
faddr += 8; /* skip the slot for the FPSCR */
}
- cpu_stl_data_ra(env, faddr, slo, ra);
- cpu_stl_data_ra(env, faddr + 4, shi, ra);
+ cpu_stl_mmu(env, faddr, slo, oi, ra);
+ cpu_stl_mmu(env, faddr + 4, shi, oi, ra);
}
- cpu_stl_data_ra(env, fptr + 0x40, vfp_get_fpscr(env), ra);
+ cpu_stl_mmu(env, fptr + 0x40, vfp_get_fpscr(env), oi, ra);
if (cpu_isar_feature(aa32_mve, cpu)) {
- cpu_stl_data_ra(env, fptr + 0x44, env->v7m.vpr, ra);
+ cpu_stl_mmu(env, fptr + 0x44, env->v7m.vpr, oi, ra);
}
/*
@@ -1121,6 +1127,9 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
{
ARMCPU *cpu = env_archcpu(env);
uintptr_t ra = GETPC();
+ ARMMMUIdx mmu_idx = arm_mmu_idx(env);
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN,
+ arm_to_core_mmu_idx(mmu_idx));
/* fptr is the value of Rn, the frame pointer we load the FP regs from */
assert(env->v7m.secure);
@@ -1155,16 +1164,16 @@ void HELPER(v7m_vlldm)(CPUARMState *env, uint32_t fptr)
faddr += 8; /* skip the slot for the FPSCR and VPR */
}
- slo = cpu_ldl_data_ra(env, faddr, ra);
- shi = cpu_ldl_data_ra(env, faddr + 4, ra);
+ slo = cpu_ldl_mmu(env, faddr, oi, ra);
+ shi = cpu_ldl_mmu(env, faddr + 4, oi, ra);
dn = (uint64_t) shi << 32 | slo;
*aa32_vfp_dreg(env, i / 2) = dn;
}
- fpscr = cpu_ldl_data_ra(env, fptr + 0x40, ra);
+ fpscr = cpu_ldl_mmu(env, fptr + 0x40, oi, ra);
vfp_set_fpscr(env, fpscr);
if (cpu_isar_feature(aa32_mve, cpu)) {
- env->v7m.vpr = cpu_ldl_data_ra(env, fptr + 0x44, ra);
+ env->v7m.vpr = cpu_ldl_mmu(env, fptr + 0x44, oi, ra);
}
}
@@ -1937,7 +1946,7 @@ static bool do_v7m_function_return(ARMCPU *cpu)
* do them as secure, so work out what MMU index that is.
*/
mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
- oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx));
+ oi = make_memop_idx(MO_LEUL | MO_ALIGN, arm_to_core_mmu_idx(mmu_idx));
newpc = cpu_ldl_mmu(env, frameptr, oi, 0);
newpsr = cpu_ldl_mmu(env, frameptr + 4, oi, 0);