aboutsummaryrefslogtreecommitdiff
path: root/core/fast-reboot.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2017-11-29 15:36:41 +1000
committerStewart Smith <stewart@linux.vnet.ibm.com>2017-12-03 21:49:12 -0600
commitff230a3e9cdfd493c85de33407ae03875af80d2f (patch)
treeea11bee0185b9b4f2faa0483a0d8e35e65fb29c6 /core/fast-reboot.c
parent4d9f29cab33bcaa5bae8110be8a66e665436b6bc (diff)
downloadskiboot-ff230a3e9cdfd493c85de33407ae03875af80d2f.zip
skiboot-ff230a3e9cdfd493c85de33407ae03875af80d2f.tar.gz
skiboot-ff230a3e9cdfd493c85de33407ae03875af80d2f.tar.bz2
fast-reboot: remove last man standing logic
The "last man standing" logic has the initiator CPU sreset all others, then one of them sresets the initiator. This complicates the fast reboot process and increases potential for errors. The initiator can simply branch to 0x100 directly. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'core/fast-reboot.c')
-rw-r--r--core/fast-reboot.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/core/fast-reboot.c b/core/fast-reboot.c
index 62c7216..aad84ad 100644
--- a/core/fast-reboot.c
+++ b/core/fast-reboot.c
@@ -37,7 +37,6 @@
/* Flag tested by the OPAL entry code */
uint8_t reboot_in_progress;
static volatile bool fast_boot_release;
-static struct cpu_thread *last_man_standing;
static struct lock reset_lock = LOCK_UNLOCKED;
static int set_special_wakeup(struct cpu_thread *cpu)
@@ -302,19 +301,10 @@ static void sreset_all_others(void)
}
}
-static void sreset_cpu(struct cpu_thread *cpu)
-{
- set_direct_ctl(cpu, P8_DIRECT_CTL_PRENAP);
- set_direct_ctl(cpu, P8_DIRECT_CTL_SRESET);
-}
-
static bool fast_reset_p8(void)
{
struct cpu_thread *cpu;
- /* Mark ourselves as last man standing in need of a reset */
- last_man_standing = this_cpu();
-
if (!sreset_all_prepare())
return false;
@@ -402,10 +392,7 @@ void fast_reboot(void)
unlock(&reset_lock);
if (success) {
- if (!next_cpu(first_cpu()))
- /* Only 1 CPU, so fake reset ourselves */
- asm volatile("ba 0x100 " : : : );
- /* Don't return */
+ asm volatile("ba 0x100\n\t" : : : "memory");
for (;;)
;
}
@@ -505,14 +492,6 @@ void __noreturn fast_reboot_entry(void)
prlog(PR_DEBUG, "RESET: CPU 0x%04x reset in\n", this_cpu()->pir);
time_wait_ms(100);
- lock(&reset_lock);
- if (last_man_standing && next_cpu(first_cpu())) {
- prlog(PR_DEBUG, "RESET: last man standing fixup...\n");
- sreset_cpu(last_man_standing);
- }
- last_man_standing = NULL;
- unlock(&reset_lock);
-
/* We reset our ICP first ! Otherwise we might get stray interrupts
* when unsplitting
*/