aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2017-03-09 11:45:37 +1100
committerStewart Smith <stewart@linux.vnet.ibm.com>2017-03-09 13:36:35 +1100
commit47a90f165b105434e5a3ef15b00ad3a7867a991b (patch)
tree21f81ea22b4dc30faa265e3a58c17a10d699d5d6
parent790ad9fe07a1ba2b55293238e21872ea6d8c740a (diff)
downloadskiboot-47a90f165b105434e5a3ef15b00ad3a7867a991b.zip
skiboot-47a90f165b105434e5a3ef15b00ad3a7867a991b.tar.gz
skiboot-47a90f165b105434e5a3ef15b00ad3a7867a991b.tar.bz2
xive: Sync HW when moving interrupts around
For now sync all queues, ensure any interrupt routed at the old queue has reached memory. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
-rw-r--r--hw/xive.c78
-rw-r--r--include/xive.h7
2 files changed, 84 insertions, 1 deletions
diff --git a/hw/xive.c b/hw/xive.c
index d18bc46..8cc0aa1 100644
--- a/hw/xive.c
+++ b/hw/xive.c
@@ -2325,11 +2325,52 @@ static void xive_update_irq_mask(struct xive_src *s, uint32_t idx, bool masked)
in_be64(mmio_base + offset);
}
+static void xive_sync(struct xive *x)
+{
+ uint64_t r;
+ void *p;
+
+ lock(&x->lock);
+
+ /* Second 2K range of second page */
+ p = x->ic_base + (1 << x->ic_shift) + 0x800;
+
+ /* TODO: Make this more fine grained */
+ out_be64(p + (10 << 7), 0); /* Sync OS escalations */
+ out_be64(p + (11 << 7), 0); /* Sync Hyp escalations */
+ out_be64(p + (12 << 7), 0); /* Sync Redistribution */
+ out_be64(p + ( 8 << 7), 0); /* Sync IPI */
+ out_be64(p + ( 9 << 7), 0); /* Sync HW */
+
+#define SYNC_MASK \
+ (VC_EQC_CONF_SYNC_IPI | \
+ VC_EQC_CONF_SYNC_HW | \
+ VC_EQC_CONF_SYNC_ESC1 | \
+ VC_EQC_CONF_SYNC_ESC2 | \
+ VC_EQC_CONF_SYNC_REDI)
+
+ /* XXX Add timeout */
+ for (;;) {
+ r = xive_regrx(x, VC_EQC_CONFIG);
+ if ((r & SYNC_MASK) == SYNC_MASK)
+ break;
+ cpu_relax();
+ }
+ xive_regw(x, VC_EQC_CONFIG, r & ~SYNC_MASK);
+
+ /* Workaround HW issue, read back before allowing a new sync */
+ xive_regr(x, VC_GLOBAL_CONFIG);
+
+ unlock(&x->lock);
+}
+
static int64_t xive_source_set_xive(struct irq_source *is, uint32_t isn,
uint16_t server, uint8_t prio)
{
struct xive_src *s = container_of(is, struct xive_src, is);
uint8_t old_prio;
+ uint32_t old_target;
+ uint32_t vp_blk;
int64_t rc;
/*
@@ -2351,7 +2392,7 @@ static int64_t xive_source_set_xive(struct irq_source *is, uint32_t isn,
server >>= 2;
/* Grab existing prio/mask */
- if (!xive_get_irq_targetting(isn, NULL, &old_prio, NULL))
+ if (!xive_get_irq_targetting(isn, &old_target, &old_prio, NULL))
return OPAL_PARAMETER;
/* Let XIVE configure the EQ synchronously */
@@ -2368,6 +2409,21 @@ static int64_t xive_source_set_xive(struct irq_source *is, uint32_t isn,
xive_update_irq_mask(s, isn - s->esb_base,
prio == 0xff);
}
+
+ /*
+ * Synchronize the source and old target XIVEs to ensure that
+ * all pending interrupts to the old target have reached their
+ * respective queue.
+ *
+ * WARNING: This assumes the VP and it's queues are on the same
+ * XIVE instance !
+ */
+ xive_sync(s->xive);
+ if (xive_decode_vp(old_target, &vp_blk, NULL, NULL, NULL)) {
+ struct xive *x = xive_from_pc_blk(vp_blk);
+ if (x)
+ xive_sync(x);
+ }
return OPAL_SUCCESS;
}
@@ -3334,6 +3390,7 @@ static int64_t opal_xive_set_irq_config(uint32_t girq,
{
struct irq_source *is = irq_find_source(girq);
struct xive_src *s = container_of(is, struct xive_src, is);
+ uint32_t old_target, vp_blk;
int64_t rc;
/*
@@ -3343,6 +3400,10 @@ static int64_t opal_xive_set_irq_config(uint32_t girq,
if (xive_mode != XIVE_MODE_EXPL)
return OPAL_WRONG_STATE;
+ /* Grab existing target */
+ if (!xive_get_irq_targetting(girq, &old_target, NULL, NULL))
+ return OPAL_PARAMETER;
+
/* Let XIVE configure the EQ. We do the update without the
* synchronous flag, thus a cache update failure will result
* in us returning OPAL_BUSY
@@ -3358,6 +3419,21 @@ static int64_t opal_xive_set_irq_config(uint32_t girq,
/* Ensure it's enabled/disabled in the source controller */
xive_update_irq_mask(s, girq - s->esb_base, prio == 0xff);
+ /*
+ * Synchronize the source and old target XIVEs to ensure that
+ * all pending interrupts to the old target have reached their
+ * respective queue.
+ *
+ * WARNING: This assumes the VP and it's queues are on the same
+ * XIVE instance !
+ */
+ xive_sync(s->xive);
+ if (xive_decode_vp(old_target, &vp_blk, NULL, NULL, NULL)) {
+ struct xive *x = xive_from_pc_blk(vp_blk);
+ if (x)
+ xive_sync(x);
+ }
+
return OPAL_SUCCESS;
}
diff --git a/include/xive.h b/include/xive.h
index 58c4da9..06aad8b 100644
--- a/include/xive.h
+++ b/include/xive.h
@@ -180,6 +180,13 @@
#define X_VC_EQC_SCRUB_MASK 0x213
#define VC_EQC_SCRUB_MASK 0x918
#define X_VC_EQC_CWATCH_SPEC 0x215
+#define VC_EQC_CONFIG 0x920
+#define X_VC_EQC_CONFIG 0x214
+#define VC_EQC_CONF_SYNC_IPI PPC_BIT(32)
+#define VC_EQC_CONF_SYNC_HW PPC_BIT(33)
+#define VC_EQC_CONF_SYNC_ESC1 PPC_BIT(34)
+#define VC_EQC_CONF_SYNC_ESC2 PPC_BIT(35)
+#define VC_EQC_CONF_SYNC_REDI PPC_BIT(36)
#define VC_EQC_CWATCH_SPEC 0x928
#define VC_EQC_CWATCH_CONFLICT PPC_BIT(0)
#define VC_EQC_CWATCH_FULL PPC_BIT(8)