aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2018-08-15 15:10:37 +1000
committerStewart Smith <stewart@linux.ibm.com>2018-08-16 18:41:17 +1000
commit2925dd08c5e39e5c82286dc65a15fce6623694b2 (patch)
treed47b5fa58a5c50703ec250193de9dafe4c49d1b0
parentd3bb756b2d98ed224a74301bb389cc797115013b (diff)
downloadskiboot-2925dd08c5e39e5c82286dc65a15fce6623694b2.zip
skiboot-2925dd08c5e39e5c82286dc65a15fce6623694b2.tar.gz
skiboot-2925dd08c5e39e5c82286dc65a15fce6623694b2.tar.bz2
lock: Move code around
This moves __try_lock() and lock_timeout() as a preparation for the next patch. No code change Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
-rw-r--r--core/lock.c78
1 files changed, 39 insertions, 39 deletions
diff --git a/core/lock.c b/core/lock.c
index c320f2c..1fc71a9 100644
--- a/core/lock.c
+++ b/core/lock.c
@@ -63,6 +63,45 @@ static void unlock_check(struct lock *l)
lock_error(l, "Releasing lock we don't hold depth", 4);
}
+static inline bool __nomcount __try_lock(struct cpu_thread *cpu, struct lock *l)
+{
+ uint64_t val;
+
+ val = cpu->pir;
+ val <<= 32;
+ val |= 1;
+
+ barrier();
+ if (__cmpxchg64(&l->lock_val, 0, val) == 0) {
+ sync();
+ return true;
+ }
+ return false;
+}
+
+#define LOCK_TIMEOUT_MS 5000
+static inline bool lock_timeout(unsigned long start)
+{
+ /* Print warning if lock has been spinning for more than TIMEOUT_MS */
+ unsigned long wait = tb_to_msecs(mftb());
+
+ if (wait - start > LOCK_TIMEOUT_MS) {
+ /*
+ * If the timebase is invalid, we shouldn't
+ * throw an error. This is possible with pending HMIs
+ * that need to recover TB.
+ */
+ if( !(mfspr(SPR_TFMR) & SPR_TFMR_TB_VALID))
+ return false;
+ prlog(PR_WARNING, "WARNING: Lock has been "\
+ "spinning for %lums\n", wait - start);
+ backtrace();
+ return true;
+ }
+
+ return false;
+}
+#else
/* Find circular dependencies in the lock requests. */
static bool check_deadlock(void)
{
@@ -132,29 +171,6 @@ static void remove_lock_request(void)
{
this_cpu()->requested_lock = NULL;
}
-
-#define LOCK_TIMEOUT_MS 5000
-static inline bool lock_timeout(unsigned long start)
-{
- /* Print warning if lock has been spinning for more than TIMEOUT_MS */
- unsigned long wait = tb_to_msecs(mftb());
-
- if (wait - start > LOCK_TIMEOUT_MS) {
- /*
- * If the timebase is invalid, we shouldn't
- * throw an error. This is possible with pending HMIs
- * that need to recover TB.
- */
- if( !(mfspr(SPR_TFMR) & SPR_TFMR_TB_VALID))
- return false;
- prlog(PR_WARNING, "WARNING: Lock has been "\
- "spinning for %lums\n", wait - start);
- backtrace();
- return true;
- }
-
- return false;
-}
#else
static inline void lock_check(struct lock *l) { };
static inline void unlock_check(struct lock *l) { };
@@ -170,22 +186,6 @@ bool lock_held_by_me(struct lock *l)
return l->lock_val == ((pir64 << 32) | 1);
}
-static inline bool __try_lock(struct cpu_thread *cpu, struct lock *l)
-{
- uint64_t val;
-
- val = cpu->pir;
- val <<= 32;
- val |= 1;
-
- barrier();
- if (__cmpxchg64(&l->lock_val, 0, val) == 0) {
- sync();
- return true;
- }
- return false;
-}
-
bool try_lock_caller(struct lock *l, const char *owner)
{
struct cpu_thread *cpu = this_cpu();