aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2017-12-12 16:22:54 +1100
committerStewart Smith <stewart@linux.vnet.ibm.com>2017-12-12 18:39:43 -0600
commitc2a6e4f3226aee3e5164bfb9f85956e7bd33a1db (patch)
treecf5b9813220ca2667aedb1d1413c4f71db32df48
parentb390b6b9d061eb1cd2e20b981d4649549f74f3f0 (diff)
downloadskiboot-c2a6e4f3226aee3e5164bfb9f85956e7bd33a1db.zip
skiboot-c2a6e4f3226aee3e5164bfb9f85956e7bd33a1db.tar.gz
skiboot-c2a6e4f3226aee3e5164bfb9f85956e7bd33a1db.tar.bz2
xive: Fix occasional VC checkstops in xive_reset
The current workaround for the scrub bug described in __xive_cache_scrub() has an issue in that it can leave dirty invalid entries in the cache. When cleaning up EQs or VPs during reset, if we then remove the underlying indirect page for these entries, the XIVE will checkstop when trying to flush them out of the cache. This replaces the existing workaround with a new pair of workarounds for VPs and EQs: - The VP one does the dummy watch on another entry than the one we scrubbed (which does the job of pushing old stores out) using an entry that is known to be backed by a permanent indirect page. - The EQ one switches to a more efficient workaround which consists of doing a non-side-effect ESB load from the EQ's ESe control bits. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com> (cherry picked from commit 1e00d546ca29d5eb9c51558250aa37facf8f2afb) Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
-rw-r--r--hw/xive.c57
1 files changed, 54 insertions, 3 deletions
diff --git a/hw/xive.c b/hw/xive.c
index f708cf3..b26d26c 100644
--- a/hw/xive.c
+++ b/hw/xive.c
@@ -1244,6 +1244,52 @@ static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype,
void *new_data, bool light_watch,
bool synchronous);
+static void xive_scrub_workaround_vp(struct xive *x, uint32_t block, uint32_t idx __unused)
+{
+ /* VP variant of the workaround described in __xive_cache_scrub(),
+ * we need to be careful to use for that workaround an NVT that
+ * sits on the same xive but isn NOT part of a donated indirect
+ * entry.
+ *
+ * The reason is that the dummy cache watch will re-create a
+ * dirty entry in the cache, even if the entry is marked
+ * invalid.
+ *
+ * Thus if we are about to dispose of the indirect entry backing
+ * it, we'll cause a checkstop later on when trying to write it
+ * out.
+ *
+ * Note: This means the workaround only works for block group
+ * mode.
+ */
+#ifdef USE_BLOCK_GROUP_MODE
+ __xive_cache_watch(x, xive_cache_vpc, block, INITIAL_VP_BASE, 0,
+ 0, NULL, true, false);
+#else
+ /* WARNING: Some workarounds related to cache scrubs require us to
+ * have at least one firmware owned (permanent) indirect entry for
+ * each XIVE instance. This currently only happens in block group
+ * mode
+ */
+#warning Block group mode should not be disabled
+#endif
+}
+
+static void xive_scrub_workaround_eq(struct xive *x, uint32_t block __unused, uint32_t idx)
+{
+ void *mmio;
+
+ /* EQ variant of the workaround described in __xive_cache_scrub(),
+ * a simple non-side effect load from ESn will do
+ */
+ mmio = x->eq_mmio + idx * 0x20000;
+
+ /* Ensure the above has returned before we do anything else
+ * the XIVE store queue is completely empty
+ */
+ load_wait(in_be64(mmio + 0x800));
+}
+
static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype,
uint64_t block, uint64_t idx,
bool want_inval, bool want_disable)
@@ -1263,6 +1309,9 @@ static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype,
* invalidate, then after the scrub, we do a dummy cache
* watch which will make the HW read the data back, which
* should be ordered behind all the preceding stores.
+ *
+ * Update: For EQs we can do a non-side effect ESB load instead
+ * which is faster.
*/
want_inval = true;
@@ -1324,9 +1373,11 @@ static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype,
/* Workaround for HW bug described above (only applies to
* EQC and VPC
*/
- if (ctype == xive_cache_eqc || ctype == xive_cache_vpc)
- __xive_cache_watch(x, ctype, block, idx, 0, 0, NULL,
- true, false);
+ if (ctype == xive_cache_eqc)
+ xive_scrub_workaround_eq(x, block, idx);
+ else if (ctype == xive_cache_vpc)
+ xive_scrub_workaround_vp(x, block, idx);
+
return 0;
}