aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-18 14:57:33 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-18 14:57:33 +1100
commit8f41f30053e2b1431703f3c7e19dc8bf7107b19a (patch)
tree1e8112eb729e10bf96bb713029d2c3e7e0af7436
parent20410452b956edaf7790803d441768fbf707b36c (diff)
downloadskiboot-8f41f30053e2b1431703f3c7e19dc8bf7107b19a.zip
skiboot-8f41f30053e2b1431703f3c7e19dc8bf7107b19a.tar.gz
skiboot-8f41f30053e2b1431703f3c7e19dc8bf7107b19a.tar.bz2
Capture backtraces when measuring stack depth
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--asm/asm-offsets.c3
-rw-r--r--asm/head.S3
-rw-r--r--core/cpu.c2
-rw-r--r--core/stack.c37
-rw-r--r--include/cpu.h6
-rw-r--r--include/stack.h4
6 files changed, 38 insertions, 17 deletions
diff --git a/asm/asm-offsets.c b/asm/asm-offsets.c
index 4fb2344..74f3124 100644
--- a/asm/asm-offsets.c
+++ b/asm/asm-offsets.c
@@ -37,10 +37,11 @@ int main(void)
OFFSET(CPUTHREAD_STATE, cpu_thread, state);
OFFSET(CPUTHREAD_CUR_TOKEN, cpu_thread, current_token);
DEFINE(CPUTHREAD_GAP, sizeof(struct cpu_thread) + STACK_SAFETY_GAP);
+#ifdef STACK_CHECK_ENABLED
OFFSET(CPUTHREAD_STACK_BOT_MARK, cpu_thread, stack_bot_mark);
OFFSET(CPUTHREAD_STACK_BOT_PC, cpu_thread, stack_bot_pc);
OFFSET(CPUTHREAD_STACK_BOT_TOK, cpu_thread, stack_bot_tok);
-
+#endif
OFFSET(STACK_TYPE, stack_frame, type);
OFFSET(STACK_LOCALS, stack_frame, locals);
OFFSET(STACK_GPR0, stack_frame, gpr[0]);
diff --git a/asm/head.S b/asm/head.S
index 75eb170..c4d5240 100644
--- a/asm/head.S
+++ b/asm/head.S
@@ -311,10 +311,11 @@ boot_entry:
/* Get our per-cpu pointer into r13 */
GET_CPU()
+#ifdef STACK_CHECK_ENABLED
/* Initialize stack bottom mark to 0, it will be updated in C code */
li %r0,0
std %r0,CPUTHREAD_STACK_BOT_MARK(%r13)
-
+#endif
/* Jump to C */
mr %r3,%r27
mr %r4,%r25
diff --git a/core/cpu.c b/core/cpu.c
index 5e60da2..1e405f8 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -339,7 +339,9 @@ static void init_cpu_thread(struct cpu_thread *t,
list_head_init(&t->job_queue);
t->state = state;
t->pir = pir;
+#ifdef STACK_CHECK_ENABLED
t->stack_bot_mark = LONG_MAX;
+#endif
assert(pir == container_of(t, struct cpu_stack, cpu) - cpu_stacks);
}
diff --git a/core/stack.c b/core/stack.c
index 0d8a7eb..9858968 100644
--- a/core/stack.c
+++ b/core/stack.c
@@ -110,6 +110,9 @@ void __noreturn __nomcount __stack_chk_fail(void)
#ifdef STACK_CHECK_ENABLED
+static int64_t lowest_stack_mark = LONG_MAX;
+static struct lock stack_check_lock = LOCK_UNLOCKED;
+
void __nomcount __mcount_stack_check(uint64_t sp, uint64_t lr);
void __nomcount __mcount_stack_check(uint64_t sp, uint64_t lr)
{
@@ -129,9 +132,14 @@ void __nomcount __mcount_stack_check(uint64_t sp, uint64_t lr)
/* Capture lowest stack for this thread */
if (mark < c->stack_bot_mark) {
+ unsigned int count = CPU_BACKTRACE_SIZE;
+ lock(&stack_check_lock);
c->stack_bot_mark = mark;
c->stack_bot_pc = lr;
c->stack_bot_tok = c->current_token;
+ __backtrace(c->stack_bot_bt, &count);
+ c->stack_bot_bt_count = count;
+ unlock(&stack_check_lock);
}
/* Stack is within bounds ? check for warning and bail */
@@ -152,14 +160,17 @@ void __nomcount __mcount_stack_check(uint64_t sp, uint64_t lr)
abort();
}
-static int64_t lowest_stack_mark = LONG_MAX;
-static struct lock stack_check_lock = LOCK_UNLOCKED;
-
void check_stacks(void)
{
struct cpu_thread *c;
- uint64_t lmark, lpc, ltok;
- int found = -1;
+
+ /* We should never call that from mcount */
+ assert(!this_cpu()->in_mcount);
+
+ /* Mark ourselves "in_mcount" to avoid deadlock on stack
+ * check lock
+ */
+ this_cpu()->in_mcount = true;
for_each_cpu(c) {
if (!c->stack_bot_mark ||
@@ -170,15 +181,15 @@ void check_stacks(void)
unlock(&stack_check_lock);
continue;
}
- lmark = lowest_stack_mark = c->stack_bot_mark;
- lpc = c->stack_bot_pc;
- ltok = c->stack_bot_tok;
- found = c->pir;
+ prlog(PR_NOTICE, "CPU %04x lowest stack mark %lld bytes left"
+ " pc=%08llx token=%lld\n",
+ c->pir, c->stack_bot_mark, c->stack_bot_pc,
+ c->stack_bot_tok);
+ __print_backtrace(c->pir, c->stack_bot_bt,
+ c->stack_bot_bt_count, NULL, NULL);
unlock(&stack_check_lock);
}
- if (found >= 0)
- prlog(PR_NOTICE, "CPU %04x lowest stack mark %lld bytes left"
- " pc=%08llx token=%lld\n", found, lmark, lpc, ltok);
-}
+ this_cpu()->in_mcount = false;
+}
#endif /* STACK_CHECK_ENABLED */
diff --git a/include/cpu.h b/include/cpu.h
index 6c8274c..d7acd25 100644
--- a/include/cpu.h
+++ b/include/cpu.h
@@ -22,6 +22,7 @@
#include <lock.h>
#include <device.h>
#include <opal.h>
+#include <stack.h>
/*
* cpu_thread is our internal structure representing each
@@ -60,9 +61,14 @@ struct cpu_thread {
uint32_t hbrt_spec_wakeup; /* primary only */
uint64_t save_l2_fir_action1;
uint64_t current_token;
+#ifdef STACK_CHECK_ENABLED
int64_t stack_bot_mark;
uint64_t stack_bot_pc;
uint64_t stack_bot_tok;
+#define CPU_BACKTRACE_SIZE 20
+ struct bt_entry stack_bot_bt[CPU_BACKTRACE_SIZE];
+ unsigned int stack_bot_bt_count;
+#endif
struct lock job_lock;
struct list_head job_queue;
};
diff --git a/include/stack.h b/include/stack.h
index d4664dd..d6a0609 100644
--- a/include/stack.h
+++ b/include/stack.h
@@ -47,9 +47,9 @@
#define STACK_SAFETY_GAP 512
/* Warning threshold, if stack goes below that on mcount, print a
- * warning
+ * warning.
*/
-#define STACK_WARNING_GAP 1024
+#define STACK_WARNING_GAP 2048
#ifndef __ASSEMBLY__