aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-11-03 00:03:49 -0400
committerPeter Maydell <peter.maydell@linaro.org>2021-12-15 10:35:26 +0000
commitee03027a2cef00f977a3d28242c0a250b8552495 (patch)
tree42e74225bdb97be6a66025ec7a5b6eb519ef34ba /target
parent936a6b86030a0db172b09a1ea953091a1555611e (diff)
downloadqemu-ee03027a2cef00f977a3d28242c0a250b8552495.zip
qemu-ee03027a2cef00f977a3d28242c0a250b8552495.tar.gz
qemu-ee03027a2cef00f977a3d28242c0a250b8552495.tar.bz2
target/arm: Take an exception if PC is misaligned
For A64, any input to an indirect branch can cause this. For A32, many indirect branch paths force the branch to be aligned, but BXWritePC does not. This includes the BX instruction but also other interworking changes to PC. Prior to v8, this case is UNDEFINED. With v8, this is CONSTRAINED UNPREDICTABLE and may either raise an exception or force align the PC. We choose to raise an exception because we have the infrastructure, it makes the generated code for gen_bx simpler, and it has the possibility of catching more guest bugs. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/helper.h1
-rw-r--r--target/arm/syndrome.h5
-rw-r--r--target/arm/tlb_helper.c18
-rw-r--r--target/arm/translate-a64.c15
-rw-r--r--target/arm/translate.c22
5 files changed, 60 insertions, 1 deletions
diff --git a/target/arm/helper.h b/target/arm/helper.h
index 448a86e..b463d93 100644
--- a/target/arm/helper.h
+++ b/target/arm/helper.h
@@ -47,6 +47,7 @@ DEF_HELPER_FLAGS_3(sel_flags, TCG_CALL_NO_RWG_SE,
DEF_HELPER_2(exception_internal, void, env, i32)
DEF_HELPER_4(exception_with_syndrome, void, env, i32, i32, i32)
DEF_HELPER_2(exception_bkpt_insn, void, env, i32)
+DEF_HELPER_2(exception_pc_alignment, noreturn, env, tl)
DEF_HELPER_1(setend, void, env)
DEF_HELPER_2(wfi, void, env, i32)
DEF_HELPER_1(wfe, void, env)
diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h
index f30f413..8cde8e7 100644
--- a/target/arm/syndrome.h
+++ b/target/arm/syndrome.h
@@ -282,4 +282,9 @@ static inline uint32_t syn_illegalstate(void)
return (EC_ILLEGALSTATE << ARM_EL_EC_SHIFT) | ARM_EL_IL;
}
+static inline uint32_t syn_pcalignment(void)
+{
+ return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL;
+}
+
#endif /* TARGET_ARM_SYNDROME_H */
diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c
index 4cacb96..b79004e 100644
--- a/target/arm/tlb_helper.c
+++ b/target/arm/tlb_helper.c
@@ -9,6 +9,7 @@
#include "cpu.h"
#include "internals.h"
#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
unsigned int target_el,
@@ -134,6 +135,23 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
+void helper_exception_pc_alignment(CPUARMState *env, target_ulong pc)
+{
+ ARMMMUFaultInfo fi = { .type = ARMFault_Alignment };
+ int target_el = exception_target_el(env);
+ int mmu_idx = cpu_mmu_index(env, true);
+ uint32_t fsc;
+
+ env->exception.vaddress = pc;
+
+ /*
+ * Note that the fsc is not applicable to this exception,
+ * since any syndrome is pcalignment not insn_abort.
+ */
+ env->exception.fsr = compute_fsr_fsc(env, &fi, target_el, mmu_idx, &fsc);
+ raise_exception(env, EXCP_PREFETCH_ABORT, syn_pcalignment(), target_el);
+}
+
#if !defined(CONFIG_USER_ONLY)
/*
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 2986fe1..130a9ff 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -14753,6 +14753,7 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
uint64_t pc = s->base.pc_next;
uint32_t insn;
+ /* Singlestep exceptions have the highest priority. */
if (s->ss_active && !s->pstate_ss) {
/* Singlestep state is Active-pending.
* If we're in this state at the start of a TB then either
@@ -14771,6 +14772,20 @@ static void aarch64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
return;
}
+ if (pc & 3) {
+ /*
+ * PC alignment fault. This has priority over the instruction abort
+ * that we would receive from a translation fault via arm_ldl_code.
+ * This should only be possible after an indirect branch, at the
+ * start of the TB.
+ */
+ assert(s->base.num_insns == 1);
+ gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+ s->base.is_jmp = DISAS_NORETURN;
+ s->base.pc_next = QEMU_ALIGN_UP(pc, 4);
+ return;
+ }
+
s->pc_curr = pc;
insn = arm_ldl_code(env, &s->base, pc, s->sctlr_b);
s->insn = insn;
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 0103c75..45917c3 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -9555,7 +9555,27 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
uint32_t pc = dc->base.pc_next;
unsigned int insn;
- if (arm_check_ss_active(dc) || arm_check_kernelpage(dc)) {
+ /* Singlestep exceptions have the highest priority. */
+ if (arm_check_ss_active(dc)) {
+ dc->base.pc_next = pc + 4;
+ return;
+ }
+
+ if (pc & 3) {
+ /*
+ * PC alignment fault. This has priority over the instruction abort
+ * that we would receive from a translation fault via arm_ldl_code
+ * (or the execution of the kernelpage entrypoint). This should only
+ * be possible after an indirect branch, at the start of the TB.
+ */
+ assert(dc->base.num_insns == 1);
+ gen_helper_exception_pc_alignment(cpu_env, tcg_constant_tl(pc));
+ dc->base.is_jmp = DISAS_NORETURN;
+ dc->base.pc_next = QEMU_ALIGN_UP(pc, 4);
+ return;
+ }
+
+ if (arm_check_kernelpage(dc)) {
dc->base.pc_next = pc + 4;
return;
}