diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2018-04-08 16:49:37 +1000 |
---|---|---|
committer | Stewart Smith <stewart@linux.ibm.com> | 2018-04-18 20:23:07 -0500 |
commit | 8514e4dc9a82f3ff85d40138f2c8e8a1dc64efa4 (patch) | |
tree | bfa96163d153d55f4d546256096c74a46a84760d /asm | |
parent | ad0941960bd045644f6834d6e711bedbde3c29c8 (diff) | |
download | skiboot-8514e4dc9a82f3ff85d40138f2c8e8a1dc64efa4.zip skiboot-8514e4dc9a82f3ff85d40138f2c8e8a1dc64efa4.tar.gz skiboot-8514e4dc9a82f3ff85d40138f2c8e8a1dc64efa4.tar.bz2 |
asm/head: implement quiescing without stack or clobbering regs
Quiescing currently is implmeented in C in opal_entry before the
opal call handler is called. This works well enough for simple
cases like fast reset when one CPU wants all others out of the way.
Linux would like to use it to prevent an sreset IPI from
interrupting firmware, which could lead to deadlocks when crash
dumping or entering the debugger. Linux interrupts do not recover
well when returning back to general OPAL code, due to r13 not being
restored. OPAL also can't be re-entered, which may happen e.g.,
from the debugger.
So move the quiesce hold/reject to entry code, beore the stack or
r1 or r13 registers are switched. OPAL can be interrupted and
returned to or re-entered during this period.
This does not completely solve all such problems. OPAL will be
interrupted with sreset if the quiesce times out, and it can be
interrupted by MCEs as well. These still have the issues above.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Stewart Smith <stewart@linux.ibm.com>
Diffstat (limited to 'asm')
-rw-r--r-- | asm/asm-offsets.c | 2 | ||||
-rw-r--r-- | asm/head.S | 72 |
2 files changed, 68 insertions, 6 deletions
diff --git a/asm/asm-offsets.c b/asm/asm-offsets.c index 7119950..3eac592 100644 --- a/asm/asm-offsets.c +++ b/asm/asm-offsets.c @@ -37,6 +37,8 @@ int main(void) OFFSET(CPUTHREAD_PIR, cpu_thread, pir); OFFSET(CPUTHREAD_SAVE_R1, cpu_thread, save_r1); OFFSET(CPUTHREAD_STATE, cpu_thread, state); + OFFSET(CPUTHREAD_IN_OPAL_CALL, cpu_thread, in_opal_call); + OFFSET(CPUTHREAD_QUIESCE_OPAL_CALL, cpu_thread, quiesce_opal_call); OFFSET(CPUTHREAD_CUR_TOKEN, cpu_thread, current_token); DEFINE(CPUTHREAD_GAP, sizeof(struct cpu_thread) + STACK_SAFETY_GAP); #ifdef STACK_CHECK_ENABLED @@ -955,14 +955,64 @@ opal_boot_trampoline: * r0: Token * r2: OPAL Base * r3..r10: Args - * r12: Scratch + * r11..r12: Scratch * r13..r31: Preserved - * */ .balign 0x10 .global opal_entry opal_entry: - /* Get our per CPU stack */ + /* Get our per CPU pointer in r12 to check for quiesce */ + mfspr %r12,SPR_PIR + GET_STACK(%r12,%r12) + + /* Get CPU thread */ + clrrdi %r12,%r12,STACK_SHIFT + + /* + * OPAL entry must first increment in_opal_call, then check + * for quiesce, without touching the stack or clobbering + * registers other than r11 and r12 and cr0. In this way, OPAL + * is tolerant of re-entry on this same CPU while it is spinning + * for quiesce. + * + * Sequence goes: + * in_opal_call++; + * sync; + * if (quiesce_opal_call) { + * in_opal_call--; + * reject-or-spin-then-retry; + */ +1: lwz %r11,CPUTHREAD_IN_OPAL_CALL(%r12) + addi %r11,%r11,1 + stw %r11,CPUTHREAD_IN_OPAL_CALL(%r12) + /* + * Order the store in_opal_call vs load quiesce_opal_call. + * This also provides an acquire barrier for opal entry vs + * another thread quiescing opal. In this way, quiescing + * can behave as mutual exclusion. + */ + sync + lwz %r11,CPUTHREAD_QUIESCE_OPAL_CALL(%r12) + cmpwi %cr0,%r11,0 + beq+ 4f + /* We are quiescing, hold or reject */ + cmpwi %cr0,%r11,QUIESCE_REJECT + bne 2f + li %r3,OPAL_BUSY + b .Lreturn /* reject */ +2: /* hold */ + lwz %r11,CPUTHREAD_IN_OPAL_CALL(%r12) + subi %r11,%r11,1 + stw %r11,CPUTHREAD_IN_OPAL_CALL(%r12) + smt_lowest +3: lwz %r11,CPUTHREAD_QUIESCE_OPAL_CALL(%r12) + cmpwi %cr0,%r11,QUIESCE_HOLD + beq 3b + /* spin finished, try again */ + smt_medium + b 1b + +4: /* Quiesce protocol done, get our per CPU stack */ mfspr %r12,SPR_PIR GET_STACK(%r12,%r12) stdu %r12,-STACK_FRAMESIZE(%r12) @@ -1006,7 +1056,7 @@ opal_entry: mr %r3,%r1 bl opal_entry_check cmpdi %r3,0 - bne 1f + bne .Lreturn ld %r0,STACK_GPR0(%r1) ld %r3,STACK_GPR3(%r1) @@ -1031,12 +1081,22 @@ opal_entry: bctrl mr %r4,%r1 - bl opal_exit_check + bl opal_exit_check /* r3 is preserved */ -1: ld %r12,STACK_LR(%r1) + /* + * Restore r1 and r13 before decrementing in_opal_call. + * Move per-cpu pointer to volatile r12, restore lr, r1, r13. + */ +.Lreturn: + ld %r12,STACK_LR(%r1) mtlr %r12 + mr %r12,%r13 ld %r13,STACK_GPR13(%r1) ld %r1,STACK_GPR1(%r1) + sync /* release barrier vs quiescing */ + lwz %r11,CPUTHREAD_IN_OPAL_CALL(%r12) + subi %r11,%r11,1 + stw %r11,CPUTHREAD_IN_OPAL_CALL(%r12) blr .global start_kernel |