aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2017-05-22 15:53:01 +1000
committerStewart Smith <stewart@linux.vnet.ibm.com>2017-06-06 20:49:06 +1000
commitdb9c1422002c1333fd09177d32edb8c2003fb4ea (patch)
tree5f6e4186e87b1c7927a1d9d915e59529c3929c19 /core
parent38b0c8454b56a74fe785f0db1d218afa8f6ea478 (diff)
downloadskiboot-db9c1422002c1333fd09177d32edb8c2003fb4ea.zip
skiboot-db9c1422002c1333fd09177d32edb8c2003fb4ea.tar.gz
skiboot-db9c1422002c1333fd09177d32edb8c2003fb4ea.tar.bz2
Improve cpu_idle when PM is disabled
Split cpu_idle() into cpu_idle_delay() and cpu_idle_job() rather than requesting the idle type as a function argument. Have those functions provide a default polling (non-PM) implentation which spin at the lowest SMT priority. This moves all the decrementer delay code into the CPU idle code rather than the caller. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'core')
-rw-r--r--core/cpu.c60
-rw-r--r--core/init.c4
-rw-r--r--core/timebase.c28
3 files changed, 59 insertions, 33 deletions
diff --git a/core/cpu.c b/core/cpu.c
index a03cf91..c4a4d29 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -286,14 +286,10 @@ void cpu_process_jobs(void)
unlock(&cpu->job_lock);
}
-static void cpu_idle_default(enum cpu_wake_cause wake_on __unused)
-{
- /* Maybe do something better for simulators ? */
- cpu_relax();
- cpu_relax();
- cpu_relax();
- cpu_relax();
-}
+enum cpu_wake_cause {
+ cpu_wake_on_job,
+ cpu_wake_on_dec,
+};
static void cpu_idle_p8(enum cpu_wake_cause wake_on)
{
@@ -301,7 +297,7 @@ static void cpu_idle_p8(enum cpu_wake_cause wake_on)
struct cpu_thread *cpu = this_cpu();
if (!pm_enabled) {
- cpu_idle_default(wake_on);
+ prlog_once(PR_DEBUG, "cpu_idle_p8 called pm disabled\n");
return;
}
@@ -373,18 +369,60 @@ void cpu_set_pm_enable(bool enabled)
}
}
-void cpu_idle(enum cpu_wake_cause wake_on)
+static void cpu_idle_pm(enum cpu_wake_cause wake_on)
{
switch(proc_gen) {
case proc_gen_p8:
cpu_idle_p8(wake_on);
break;
default:
- cpu_idle_default(wake_on);
+ prlog_once(PR_DEBUG, "cpu_idle_pm called with bad processor type\n");
break;
}
}
+void cpu_idle_job(void)
+{
+ if (pm_enabled) {
+ cpu_idle_pm(cpu_wake_on_job);
+ } else {
+ struct cpu_thread *cpu = this_cpu();
+
+ smt_lowest();
+ /* Check for jobs again */
+ while (!cpu_check_jobs(cpu))
+ barrier();
+ smt_medium();
+ }
+}
+
+void cpu_idle_delay(unsigned long delay, unsigned long min_pm)
+{
+ unsigned long now = mftb();
+ unsigned long end = now + delay;
+
+ if (pm_enabled && delay > min_pm) {
+ for (;;) {
+ if (delay >= 0x7fffffff)
+ delay = 0x7fffffff;
+ mtspr(SPR_DEC, delay);
+
+ cpu_idle_pm(cpu_wake_on_dec);
+
+ now = mftb();
+ if (tb_compare(now, end) == TB_AAFTERB)
+ break;
+
+ delay = end - now;
+ }
+ } else {
+ smt_lowest();
+ while (tb_compare(mftb(), end) != TB_AAFTERB)
+ barrier();
+ smt_medium();
+ }
+}
+
void cpu_process_local_jobs(void)
{
struct cpu_thread *cpu = first_available_cpu();
diff --git a/core/init.c b/core/init.c
index dce10fd..8bd737a 100644
--- a/core/init.c
+++ b/core/init.c
@@ -1068,9 +1068,9 @@ void __noreturn __secondary_cpu_entry(void)
/* Wait for work to do */
while(true) {
if (cpu_check_jobs(cpu))
- cpu_process_jobs();
+ cpu_process_jobs();
else
- cpu_idle(cpu_wake_on_job);
+ cpu_idle_job();
}
}
diff --git a/core/timebase.c b/core/timebase.c
index a3c0fec..ca961c3 100644
--- a/core/timebase.c
+++ b/core/timebase.c
@@ -24,8 +24,8 @@ unsigned long tb_hz = 512000000;
static void time_wait_poll(unsigned long duration)
{
- unsigned long remaining = duration;
- unsigned long end = mftb() + duration;
+ unsigned long now = mftb();
+ unsigned long end = now + duration;
unsigned long period = msecs_to_tb(5);
if (this_cpu()->tb_invalid) {
@@ -33,7 +33,9 @@ static void time_wait_poll(unsigned long duration)
return;
}
- while (tb_compare(mftb(), end) != TB_AAFTERB) {
+ while (tb_compare(now, end) != TB_AAFTERB) {
+ unsigned long remaining = end - now;
+
/* Call pollers periodically but not continually to avoid
* bouncing cachelines due to lock contention. */
if (remaining >= period) {
@@ -43,7 +45,7 @@ static void time_wait_poll(unsigned long duration)
} else
time_wait_nopoll(remaining);
- cpu_relax();
+ now = mftb();
}
}
@@ -64,28 +66,14 @@ void time_wait(unsigned long duration)
void time_wait_nopoll(unsigned long duration)
{
- unsigned long end = mftb() + duration;
- unsigned long min = usecs_to_tb(10);
+ unsigned long min_sleep = usecs_to_tb(10);
if (this_cpu()->tb_invalid) {
cpu_relax();
return;
}
- for (;;) {
- uint64_t delay, tb = mftb();
-
- if (tb_compare(tb, end) == TB_AAFTERB)
- break;
- delay = end - tb;
- if (delay >= 0x7fffffff)
- delay = 0x7fffffff;
- if (delay >= min) {
- mtspr(SPR_DEC, delay);
- cpu_idle(cpu_wake_on_dec);
- } else
- cpu_relax();
- }
+ cpu_idle_delay(duration, min_sleep);
}
void time_wait_ms(unsigned long ms)