aboutsummaryrefslogtreecommitdiff
path: root/hw/intc/armv7m_nvic.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/intc/armv7m_nvic.c')
-rw-r--r--hw/intc/armv7m_nvic.c96
1 files changed, 96 insertions, 0 deletions
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 53b4631..fff6e69 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -655,6 +655,102 @@ void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
do_armv7m_nvic_set_pending(opaque, irq, secure, true);
}
+void armv7m_nvic_set_pending_lazyfp(void *opaque, int irq, bool secure)
+{
+ /*
+ * Pend an exception during lazy FP stacking. This differs
+ * from the usual exception pending because the logic for
+ * whether we should escalate depends on the saved context
+ * in the FPCCR register, not on the current state of the CPU/NVIC.
+ */
+ NVICState *s = (NVICState *)opaque;
+ bool banked = exc_is_banked(irq);
+ VecInfo *vec;
+ bool targets_secure;
+ bool escalate = false;
+ /*
+ * We will only look at bits in fpccr if this is a banked exception
+ * (in which case 'secure' tells us whether it is the S or NS version).
+ * All the bits for the non-banked exceptions are in fpccr_s.
+ */
+ uint32_t fpccr_s = s->cpu->env.v7m.fpccr[M_REG_S];
+ uint32_t fpccr = s->cpu->env.v7m.fpccr[secure];
+
+ assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
+ assert(!secure || banked);
+
+ vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
+
+ targets_secure = banked ? secure : exc_targets_secure(s, irq);
+
+ switch (irq) {
+ case ARMV7M_EXCP_DEBUG:
+ if (!(fpccr_s & R_V7M_FPCCR_MONRDY_MASK)) {
+ /* Ignore DebugMonitor exception */
+ return;
+ }
+ break;
+ case ARMV7M_EXCP_MEM:
+ escalate = !(fpccr & R_V7M_FPCCR_MMRDY_MASK);
+ break;
+ case ARMV7M_EXCP_USAGE:
+ escalate = !(fpccr & R_V7M_FPCCR_UFRDY_MASK);
+ break;
+ case ARMV7M_EXCP_BUS:
+ escalate = !(fpccr_s & R_V7M_FPCCR_BFRDY_MASK);
+ break;
+ case ARMV7M_EXCP_SECURE:
+ escalate = !(fpccr_s & R_V7M_FPCCR_SFRDY_MASK);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (escalate) {
+ /*
+ * Escalate to HardFault: faults that initially targeted Secure
+ * continue to do so, even if HF normally targets NonSecure.
+ */
+ irq = ARMV7M_EXCP_HARD;
+ if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
+ (targets_secure ||
+ !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
+ vec = &s->sec_vectors[irq];
+ } else {
+ vec = &s->vectors[irq];
+ }
+ }
+
+ if (!vec->enabled ||
+ nvic_exec_prio(s) <= exc_group_prio(s, vec->prio, secure)) {
+ if (!(fpccr_s & R_V7M_FPCCR_HFRDY_MASK)) {
+ /*
+ * We want to escalate to HardFault but the context the
+ * FP state belongs to prevents the exception pre-empting.
+ */
+ cpu_abort(&s->cpu->parent_obj,
+ "Lockup: can't escalate to HardFault during "
+ "lazy FP register stacking\n");
+ }
+ }
+
+ if (escalate) {
+ s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
+ }
+ if (!vec->pending) {
+ vec->pending = 1;
+ /*
+ * We do not call nvic_irq_update(), because we know our caller
+ * is going to handle causing us to take the exception by
+ * raising EXCP_LAZYFP, so raising the IRQ line would be
+ * pointless extra work. We just need to recompute the
+ * priorities so that armv7m_nvic_can_take_pending_exception()
+ * returns the right answer.
+ */
+ nvic_recompute_state(s);
+ }
+}
+
/* Make pending IRQ active. */
void armv7m_nvic_acknowledge_irq(void *opaque)
{