From 71664fd8d2d2550a56cc6a9c2b81797bfe90d613 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Mon, 17 Nov 2014 18:22:04 +1100 Subject: Stack checking extensions This patch adds: - Normal builds are done with -fstack-protector (we want to investigate using -fstack-protector-strong on gcc4.9 but for now we just use that - Build with STACK_CHECK=1 will use -fstack-protector-all and -pg and will check the stack in mcount Signed-off-by: Benjamin Herrenschmidt --- core/Makefile.inc | 3 ++ core/backtrace.c | 2 +- core/cpu.c | 23 +++++++------- core/opal.c | 3 ++ core/utils.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 5 files changed, 107 insertions(+), 14 deletions(-) (limited to 'core') diff --git a/core/Makefile.inc b/core/Makefile.inc index 475e2c7..07dfe75 100644 --- a/core/Makefile.inc +++ b/core/Makefile.inc @@ -10,4 +10,7 @@ CORE_OBJS += console-log.o ipmi.o time-utils.o pel.o pool.o errorlog.o CORE_OBJS += timer.o i2c.o CORE=core/built-in.o +CFLAGS_SKIP_core/relocate.o = -pg -fstack-protector-all +CFLAGS_SKIP_core/relocate.o += -fstack-protector -fstack-protector-strong + $(CORE): $(CORE_OBJS:%=core/%) diff --git a/core/backtrace.c b/core/backtrace.c index 2d05d3d..c4c8546 100644 --- a/core/backtrace.c +++ b/core/backtrace.c @@ -24,7 +24,7 @@ static char backtrace_buffer[STACK_BUF_SZ]; /* Dumps backtrace to buffer */ -void __backtrace(char *bt_buf, int bt_buf_len) +void __nomcount __backtrace(char *bt_buf, int bt_buf_len) { unsigned int pir = mfspr(SPR_PIR); unsigned long *sp; diff --git a/core/cpu.c b/core/cpu.c index 8f203bb..aa046cc 100644 --- a/core/cpu.c +++ b/core/cpu.c @@ -62,7 +62,17 @@ void __attrconst *cpu_stack_bottom(unsigned int pir) return (void *)&cpu_stacks[pir] + sizeof(struct cpu_thread); } -void cpu_relax(void) +void __attrconst *cpu_stack_top(unsigned int pir) +{ + /* This is the top of the MC stack which is above the normal + * stack, which means a SP between cpu_stack_bottom() and + * cpu_stack_top() can either be a normal stack pointer or + * a Machine Check stack pointer + */ + return (void *)&cpu_stacks[pir] + STACK_SIZE - STACK_TOP_GAP; +} + +void __nomcount cpu_relax(void) { /* Relax a bit to give sibling threads some breathing space */ smt_low(); @@ -74,16 +84,6 @@ void cpu_relax(void) smt_medium(); } -void __attrconst *cpu_stack_top(unsigned int pir) -{ - /* This is the top of the MC stack which is above the normal - * stack, which means a SP between cpu_stack_bottom() and - * cpu_stack_top() can either be a normal stack pointer or - * a Machine Check stack pointer - */ - return (void *)&cpu_stacks[pir] + STACK_SIZE - STACK_TOP_GAP; -} - struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu, void (*func)(void *data), void *data, bool no_return) @@ -337,6 +337,7 @@ static void init_cpu_thread(struct cpu_thread *t, list_head_init(&t->job_queue); t->state = state; t->pir = pir; + t->stack_bot_mark = LONG_MAX; assert(pir == container_of(t, struct cpu_stack, cpu) - cpu_stacks); } diff --git a/core/opal.c b/core/opal.c index 5e1c742..7e53486 100644 --- a/core/opal.c +++ b/core/opal.c @@ -285,6 +285,9 @@ void opal_run_pollers(void) /* The pollers are run lokelessly, see comment in opal_del_poller */ list_for_each(&opal_pollers, poll_ent, link) poll_ent->poller(poll_ent->data); + + /* On debug builds, print max stack usage */ + check_stacks(); } static int64_t opal_poll_events(uint64_t *outstanding_event_mask) diff --git a/core/utils.c b/core/utils.c index 71f3842..1b1a8bf 100644 --- a/core/utils.c +++ b/core/utils.c @@ -18,14 +18,18 @@ #include #include #include +#include +#include -void assert_fail(const char *msg) +unsigned long __stack_chk_guard = 0xdeadf00dbaad300d; + +void __noreturn assert_fail(const char *msg) { prlog(PR_EMERG, "Assert fail: %s\n", msg); abort(); } -void abort(void) +void __noreturn abort(void) { static bool in_abort = false; unsigned long hid0; @@ -63,3 +67,85 @@ char __attrconst tohex(uint8_t nibble) return '?'; return __tohex[nibble]; } + +void __noreturn __nomcount __stack_chk_fail(void); +void __noreturn __nomcount __stack_chk_fail(void) +{ + prlog(PR_EMERG, "Stack corruption detected !\n"); + abort(); +} + +#ifdef STACK_CHECK_ENABLED + +void __nomcount __mcount_stack_check(uint64_t sp, uint64_t lr); +void __nomcount __mcount_stack_check(uint64_t sp, uint64_t lr) +{ + struct cpu_thread *c = this_cpu(); + uint64_t base = (uint64_t)c; + uint64_t bot = base + sizeof(struct cpu_thread); + int64_t mark = sp - bot; + uint64_t top = base + NORMAL_STACK_SIZE; + + /* + * Don't re-enter on this CPU or don't enter at all if somebody + * has spotted an overflow + */ + if (c->in_mcount) + return; + c->in_mcount = true; + + /* Capture lowest stack for this thread */ + if (mark < c->stack_bot_mark) { + c->stack_bot_mark = mark; + c->stack_bot_pc = lr; + c->stack_bot_tok = c->current_token; + } + + /* Stack is within bounds ? check for warning and bail */ + if (sp >= (bot + STACK_SAFETY_GAP) && sp < top) { + if (mark < STACK_WARNING_GAP) { + prlog(PR_EMERG, "CPU %04x Stack usage danger !" + " pc=%08llx sp=%08llx (gap=%lld) token=%lld\n", + c->pir, lr, sp, mark, c->current_token); + backtrace(); + } + c->in_mcount = false; + return; + } + + prlog(PR_EMERG, "CPU %04x Stack overflow detected !" + " pc=%08llx sp=%08llx (gap=%lld) token=%lld\n", + c->pir, lr, sp, mark, c->current_token); + abort(); +} + +static int64_t lowest_stack_mark = LONG_MAX; +static struct lock stack_check_lock = LOCK_UNLOCKED; + +void check_stacks(void) +{ + struct cpu_thread *c; + uint64_t lmark, lpc, ltok; + int found = -1; + + for_each_cpu(c) { + if (!c->stack_bot_mark || + c->stack_bot_mark >= lowest_stack_mark) + continue; + lock(&stack_check_lock); + if (c->stack_bot_mark >= lowest_stack_mark) { + unlock(&stack_check_lock); + continue; + } + lmark = lowest_stack_mark = c->stack_bot_mark; + lpc = c->stack_bot_pc; + ltok = c->stack_bot_tok; + found = c->pir; + unlock(&stack_check_lock); + } + if (found >= 0) + prlog(PR_NOTICE, "CPU %04x lowest stack mark %lld bytes left" + " pc=%08llx token=%lld\n", found, lmark, lpc, ltok); +} + +#endif /* STACK_CHECK_ENABLED */ -- cgit v1.1