diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2017-11-29 15:36:42 +1000 |
---|---|---|
committer | Stewart Smith <stewart@linux.vnet.ibm.com> | 2017-12-03 21:49:12 -0600 |
commit | f84a8a1c749d8ee7b5e91d5a04a98c6332c182b1 (patch) | |
tree | 415c2806cec79189632d660815df3f8ac105c5e1 | |
parent | ff230a3e9cdfd493c85de33407ae03875af80d2f (diff) | |
download | skiboot-f84a8a1c749d8ee7b5e91d5a04a98c6332c182b1.zip skiboot-f84a8a1c749d8ee7b5e91d5a04a98c6332c182b1.tar.gz skiboot-f84a8a1c749d8ee7b5e91d5a04a98c6332c182b1.tar.bz2 |
fast-reboot: clean up some common cpu iteration processes with macros
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
-rw-r--r-- | core/cpu.c | 28 | ||||
-rw-r--r-- | core/fast-reboot.c | 57 | ||||
-rw-r--r-- | include/cpu.h | 10 |
3 files changed, 49 insertions, 46 deletions
@@ -709,6 +709,34 @@ struct cpu_thread *first_present_cpu(void) return next_present_cpu(NULL); } +struct cpu_thread *next_ungarded_cpu(struct cpu_thread *cpu) +{ + do { + cpu = next_cpu(cpu); + } while(cpu && cpu->state == cpu_state_unavailable); + + return cpu; +} + +struct cpu_thread *first_ungarded_cpu(void) +{ + return next_ungarded_cpu(NULL); +} + +struct cpu_thread *next_ungarded_primary(struct cpu_thread *cpu) +{ + do { + cpu = next_cpu(cpu); + } while(cpu && cpu->state == cpu_state_unavailable && cpu->primary != cpu); + + return cpu; +} + +struct cpu_thread *first_ungarded_primary(void) +{ + return next_ungarded_primary(NULL); +} + u8 get_available_nr_cores_in_chip(u32 chip_id) { struct cpu_thread *core; diff --git a/core/fast-reboot.c b/core/fast-reboot.c index aad84ad..412639a 100644 --- a/core/fast-reboot.c +++ b/core/fast-reboot.c @@ -233,24 +233,15 @@ static int sreset_all_prepare(void) this_cpu()->pir, pir_to_core_id(this_cpu()->pir)); /* Assert special wakup on all cores. Only on operational cores. */ - for_each_cpu(cpu) { - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - - if (cpu->primary == cpu) - if (set_special_wakeup(cpu) != OPAL_SUCCESS) - return false; + for_each_ungarded_primary(cpu) { + if (set_special_wakeup(cpu) != OPAL_SUCCESS) + return false; } prlog(PR_DEBUG, "RESET: Stopping the world...\n"); /* Put everybody in stop except myself */ - for_each_cpu(cpu) { - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - + for_each_ungarded_cpu(cpu) { if (cpu != this_cpu()) set_direct_ctl(cpu, P8_DIRECT_CTL_STOP); } @@ -262,14 +253,8 @@ static void sreset_all_finish(void) { struct cpu_thread *cpu; - for_each_cpu(cpu) { - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - - if (cpu->primary == cpu) - clr_special_wakeup(cpu); - } + for_each_ungarded_primary(cpu) + clr_special_wakeup(cpu); } static void sreset_all_others(void) @@ -279,11 +264,7 @@ static void sreset_all_others(void) prlog(PR_DEBUG, "RESET: Pre-napping all threads but one...\n"); /* Put everybody in pre-nap except myself */ - for_each_cpu(cpu) { - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - + for_each_ungarded_cpu(cpu) { if (cpu != this_cpu()) set_direct_ctl(cpu, P8_DIRECT_CTL_PRENAP); } @@ -291,11 +272,7 @@ static void sreset_all_others(void) prlog(PR_DEBUG, "RESET: Resetting all threads but one...\n"); /* Reset everybody except my own core threads */ - for_each_cpu(cpu) { - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - + for_each_ungarded_cpu(cpu) { if (cpu != this_cpu()) set_direct_ctl(cpu, P8_DIRECT_CTL_SRESET); } @@ -309,11 +286,7 @@ static bool fast_reset_p8(void) return false; /* Put everybody in stop except myself */ - for_each_cpu(cpu) { - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - + for_each_ungarded_cpu(cpu) { /* Also make sure that saved_r1 is 0 ! That's what will * make our reset vector jump to fast_reboot_entry */ @@ -522,14 +495,10 @@ void __noreturn fast_reboot_entry(void) /* We are the original boot CPU, wait for secondaries to * be captured. */ - for_each_cpu(cpu) { + for_each_ungarded_cpu(cpu) { if (cpu == this_cpu()) continue; - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - /* XXX Add a callin timeout ? */ while (cpu->state != cpu_state_present) { smt_lowest(); @@ -545,14 +514,10 @@ void __noreturn fast_reboot_entry(void) sync(); /* Wait for them to respond */ - for_each_cpu(cpu) { + for_each_ungarded_cpu(cpu) { if (cpu == this_cpu()) continue; - /* GARDed CPUs are marked unavailable. Skip them. */ - if (cpu->state == cpu_state_unavailable) - continue; - /* XXX Add a callin timeout ? */ while (cpu->state == cpu_state_present) { smt_lowest(); diff --git a/include/cpu.h b/include/cpu.h index 5db4ccb..3d5dbd4 100644 --- a/include/cpu.h +++ b/include/cpu.h @@ -190,6 +190,10 @@ extern struct cpu_thread *first_available_cpu(void); extern struct cpu_thread *next_available_cpu(struct cpu_thread *cpu); extern struct cpu_thread *first_present_cpu(void); extern struct cpu_thread *next_present_cpu(struct cpu_thread *cpu); +extern struct cpu_thread *first_ungarded_cpu(void); +extern struct cpu_thread *next_ungarded_cpu(struct cpu_thread *cpu); +extern struct cpu_thread *first_ungarded_primary(void); +extern struct cpu_thread *next_ungarded_primary(struct cpu_thread *cpu); #define for_each_cpu(cpu) \ for (cpu = first_cpu(); cpu; cpu = next_cpu(cpu)) @@ -200,6 +204,12 @@ extern struct cpu_thread *next_present_cpu(struct cpu_thread *cpu); #define for_each_present_cpu(cpu) \ for (cpu = first_present_cpu(); cpu; cpu = next_present_cpu(cpu)) +#define for_each_ungarded_cpu(cpu) \ + for (cpu = first_ungarded_cpu(); cpu; cpu = next_ungarded_cpu(cpu)) + +#define for_each_ungarded_primary(cpu) \ + for (cpu = first_ungarded_primary(); cpu; cpu = next_ungarded_primary(cpu)) + extern struct cpu_thread *first_available_core_in_chip(u32 chip_id); extern struct cpu_thread *next_available_core_in_chip(struct cpu_thread *cpu, u32 chip_id); extern u8 get_available_nr_cores_in_chip(u32 chip_id); |