diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2017-09-10 17:36:03 +1000 |
---|---|---|
committer | Stewart Smith <stewart@linux.vnet.ibm.com> | 2017-09-12 22:52:08 -0500 |
commit | e3c2498551fb8ef7db317d501f935a8a2e5d37cc (patch) | |
tree | 0f30566cf09a4686b72505c9ecbe35ff64eda8cf /hw | |
parent | e608f9ee6b080ddb6a0e59b91549f25d17f7dac5 (diff) | |
download | skiboot-e3c2498551fb8ef7db317d501f935a8a2e5d37cc.zip skiboot-e3c2498551fb8ef7db317d501f935a8a2e5d37cc.tar.gz skiboot-e3c2498551fb8ef7db317d501f935a8a2e5d37cc.tar.bz2 |
xive: Fix locking around cache scrub & watch
Thankfully the missing locking only affects debug code and
init code that doesn't run concurrently. Also adds a DEBUG
option that checks the lock is properly held.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/xive.c | 19 |
1 files changed, 19 insertions, 0 deletions
@@ -45,11 +45,13 @@ #define XIVE_PERCPU_LOG #define XIVE_DEBUG_INIT_CACHE_UPDATES #define XIVE_EXTRA_CHECK_INIT_CACHE +#define XIVE_CHECK_LOCKS #else #undef XIVE_DEBUG_DUPLICATES #undef XIVE_PERCPU_LOG #undef XIVE_DEBUG_INIT_CACHE_UPDATES #undef XIVE_EXTRA_CHECK_INIT_CACHE +#undef XIVE_CHECK_LOCKS #endif /* @@ -1245,6 +1247,10 @@ static int64_t __xive_cache_scrub(struct xive *x, enum xive_cache_type ctype, uint64_t sreg, sregx, mreg, mregx; uint64_t mval, sval; +#ifdef XIVE_CHECK_LOCKS + assert(lock_held_by_me(&x->lock)); +#endif + /* Workaround a HW bug in XIVE where the scrub completion * isn't ordered by loads, thus the data might still be * in a queue and may not have reached coherency. @@ -1341,6 +1347,9 @@ static int64_t __xive_cache_watch(struct xive *x, enum xive_cache_type ctype, uint64_t dval0, sval, status; int64_t i; +#ifdef XIVE_CHECK_LOCKS + assert(lock_held_by_me(&x->lock)); +#endif switch (ctype) { case xive_cache_eqc: sreg = VC_EQC_CWATCH_SPEC; @@ -3016,6 +3025,7 @@ static void xive_setup_hw_for_emu(struct xive_cpu_state *xs) xs->eq_page, XIVE_EMULATION_PRIO); /* Use the cache watch to write it out */ + lock(&x_eq->lock); xive_eqc_cache_update(x_eq, xs->eq_blk, xs->eq_idx + XIVE_EMULATION_PRIO, 0, 4, &eq, false, true); @@ -3023,14 +3033,17 @@ static void xive_setup_hw_for_emu(struct xive_cpu_state *xs) /* Extra testing of cache watch & scrub facilities */ xive_special_cache_check(x_vp, xs->vp_blk, xs->vp_idx); + unlock(&x_eq->lock); /* Initialize/enable the VP */ xive_init_default_vp(&vp, xs->eq_blk, xs->eq_idx); /* Use the cache watch to write it out */ + lock(&x_vp->lock); xive_vpc_cache_update(x_vp, xs->vp_blk, xs->vp_idx, 0, 8, &vp, false, true); xive_check_vpc_update(x_vp, xs->vp_idx, &vp); + unlock(&x_vp->lock); } static void xive_init_cpu_emulation(struct xive_cpu_state *xs, @@ -3075,8 +3088,10 @@ static void xive_init_cpu_exploitation(struct xive_cpu_state *xs) xive_init_default_vp(&vp, xs->eq_blk, xs->eq_idx); /* Use the cache watch to write it out */ + lock(&x_vp->lock); xive_vpc_cache_update(x_vp, xs->vp_blk, xs->vp_idx, 0, 8, &vp, false, true); + unlock(&x_vp->lock); /* Clenaup remaining state */ xs->cppr = 0; @@ -3263,9 +3278,11 @@ static uint32_t xive_read_eq(struct xive_cpu_state *xs, bool just_peek) xs->eqbuf[(xs->eqptr + 2) & xs->eqmsk], xs->eqbuf[(xs->eqptr + 3) & xs->eqmsk], xs->eqgen, xs->eqptr, just_peek); + lock(&xs->xive->lock); __xive_cache_scrub(xs->xive, xive_cache_eqc, xs->eq_blk, xs->eq_idx + XIVE_EMULATION_PRIO, false, false); + unlock(&xs->xive->lock); eq = xive_get_eq(xs->xive, xs->eq_idx + XIVE_EMULATION_PRIO); prerror("EQ @%p W0=%08x W1=%08x qbuf @%p\n", eq, eq->w0, eq->w1, xs->eqbuf); @@ -3503,9 +3520,11 @@ static int64_t opal_xive_get_xirr(uint32_t *out_xirr, bool just_poll) #ifdef XIVE_PERCPU_LOG { struct xive_eq *eq; + lock(&xs->xive->lock); __xive_cache_scrub(xs->xive, xive_cache_eqc, xs->eq_blk, xs->eq_idx + XIVE_EMULATION_PRIO, false, false); + unlock(&xs->xive->lock); eq = xive_get_eq(xs->xive, xs->eq_idx + XIVE_EMULATION_PRIO); log_add(xs, LOG_TYPE_EQD, 2, eq->w0, eq->w1); } |