aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2017-12-20 13:16:23 +1100
committerStewart Smith <stewart@linux.vnet.ibm.com>2017-12-20 22:15:36 -0600
commit76d9bcdca58936d761458f8f05960239c4dd8dec (patch)
treec8377b11be33e62d312810645b8044d3a6f427aa /include
parentca612b802adac0c72cd0f10c51a51275e5914101 (diff)
downloadskiboot-76d9bcdca58936d761458f8f05960239c4dd8dec.zip
skiboot-76d9bcdca58936d761458f8f05960239c4dd8dec.tar.gz
skiboot-76d9bcdca58936d761458f8f05960239c4dd8dec.tar.bz2
lock: Add additional lock auditing code
Keep track of lock owner name and replace lock_depth counter with a per-cpu list of locks held by the cpu. This allows us to print the actual locks held in case we hit the (in)famous message about opal_pollers being run with a lock held. It also allows us to warn (and drop them) if locks are still held when returning to the OS or completing a scheduled job. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> [stewart: fix unit tests] Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'include')
-rw-r--r--include/cpu.h2
-rw-r--r--include/lock.h33
2 files changed, 29 insertions, 6 deletions
diff --git a/include/cpu.h b/include/cpu.h
index e3bc75f..bec4b03 100644
--- a/include/cpu.h
+++ b/include/cpu.h
@@ -61,8 +61,8 @@ struct cpu_thread {
uint64_t save_r1;
void *icp_regs;
uint32_t in_opal_call;
- uint32_t lock_depth;
uint32_t con_suspend;
+ struct list_head locks_held;
bool con_need_flush;
bool quiesce_opal_call;
bool in_mcount;
diff --git a/include/lock.h b/include/lock.h
index 7bdfca2..b187573 100644
--- a/include/lock.h
+++ b/include/lock.h
@@ -20,6 +20,8 @@
#include <stdbool.h>
#include <processor.h>
#include <cmpxchg.h>
+#include <ccan/list/list.h>
+#include <ccan/str/str.h>
struct lock {
/* Lock value has bit 63 as lock bit and the PIR of the owner
@@ -32,10 +34,19 @@ struct lock {
* in which case taking it will suspend console flushing
*/
bool in_con_path;
+
+ /* file/line of lock owner */
+ const char *owner;
+
+ /* linkage in per-cpu list of owned locks */
+ struct list_node list;
};
-/* Initializer */
-#define LOCK_UNLOCKED { .lock_val = 0, .in_con_path = 0 }
+/* Initializer... not ideal but works for now. If we need different
+ * values for the fields and/or start getting warnings we'll have to
+ * play macro tricks
+ */
+#define LOCK_UNLOCKED { 0 }
/* Note vs. libc and locking:
*
@@ -65,8 +76,14 @@ static inline void init_lock(struct lock *l)
*l = (struct lock)LOCK_UNLOCKED;
}
-extern bool try_lock(struct lock *l);
-extern void lock(struct lock *l);
+#define LOCK_CALLER __FILE__ ":" stringify(__LINE__)
+
+#define try_lock(l) try_lock_caller(l, LOCK_CALLER)
+#define lock(l) lock_caller(l, LOCK_CALLER)
+#define lock_recursive(l) lock_recursive_caller(l, LOCK_CALLER)
+
+extern bool try_lock_caller(struct lock *l, const char *caller);
+extern void lock_caller(struct lock *l, const char *caller);
extern void unlock(struct lock *l);
extern bool lock_held_by_me(struct lock *l);
@@ -77,9 +94,15 @@ extern bool lock_held_by_me(struct lock *l);
* returns false if the lock was already held by this cpu. If it returns
* true, then the caller shall release it when done.
*/
-extern bool lock_recursive(struct lock *l);
+extern bool lock_recursive_caller(struct lock *l, const char *caller);
/* Called after per-cpu data structures are available */
extern void init_locks(void);
+/* Dump the list of locks held by this CPU */
+extern void dump_locks_list(void);
+
+/* Clean all locks held by CPU (and warn if any) */
+extern void drop_my_locks(bool warn);
+
#endif /* __LOCK_H */