aboutsummaryrefslogtreecommitdiff
path: root/core/cpu.c
diff options
context:
space:
mode:
authorStewart Smith <stewart@linux.vnet.ibm.com>2015-05-06 14:00:52 +1000
committerStewart Smith <stewart@linux.vnet.ibm.com>2015-05-07 17:54:30 +1000
commit0a3e75d5988c334197d4cb27d92acb901605019a (patch)
tree28e2da85cb532958da5ca62dbee8176109f11c04 /core/cpu.c
parent00f469a5f9231c50d478407ae7d42fcbad942fc0 (diff)
downloadskiboot-0a3e75d5988c334197d4cb27d92acb901605019a.zip
skiboot-0a3e75d5988c334197d4cb27d92acb901605019a.tar.gz
skiboot-0a3e75d5988c334197d4cb27d92acb901605019a.tar.bz2
Add global CPU job queue
When we have multiple systems trying to start concurrent jobs on different CPUs, they typically pick the first available (operating) CPU to schedule the job on. This works fine when there's only one set of jobs or when we want to bind jobs to specific CPUs. When we have jobs such as asynchronously loading LIDs and scanning PHBs, we don't care which CPUs they run on, we care more that they are not scheduled on CPUs that have existing tasks. This patch adds a global queue of jobs which secondary CPUs will look at for work (if idle). This leads to simplified callers, which just need to queue jobs to NULL (no specific CPU) and then call a magic function that will run the CPU job queue if we don't have secondary CPUs. Additionally, we add a const char *name to cpu_job just to aid with debugging. Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'core/cpu.c')
-rw-r--r--core/cpu.c65
1 files changed, 55 insertions, 10 deletions
diff --git a/core/cpu.c b/core/cpu.c
index 262316c..9eb4240 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -54,10 +54,14 @@ struct cpu_job {
struct list_node link;
void (*func)(void *data);
void *data;
+ const char *name;
bool complete;
bool no_return;
};
+static struct lock global_job_queue_lock = LOCK_UNLOCKED;
+static struct list_head global_job_queue;
+
/* attribute const as cpu_stacks is constant. */
unsigned long __attrconst cpu_stack_bottom(unsigned int pir)
{
@@ -89,12 +93,13 @@ void __nomcount cpu_relax(void)
}
struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
+ const char *name,
void (*func)(void *data), void *data,
bool no_return)
{
struct cpu_job *job;
- if (!cpu_is_available(cpu)) {
+ if (cpu && !cpu_is_available(cpu)) {
prerror("CPU: Tried to queue job on unavailable CPU 0x%04x\n",
cpu->pir);
return NULL;
@@ -105,10 +110,15 @@ struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
return NULL;
job->func = func;
job->data = data;
+ job->name = name;
job->complete = false;
job->no_return = no_return;
- if (cpu != this_cpu()) {
+ if (cpu == NULL) {
+ lock(&global_job_queue_lock);
+ list_add_tail(&global_job_queue, &job->link);
+ unlock(&global_job_queue_lock);
+ } else if (cpu != this_cpu()) {
lock(&cpu->job_lock);
list_add_tail(&cpu->job_queue, &job->link);
unlock(&cpu->job_lock);
@@ -158,30 +168,40 @@ void cpu_free_job(struct cpu_job *job)
void cpu_process_jobs(void)
{
struct cpu_thread *cpu = this_cpu();
- struct cpu_job *job;
+ struct cpu_job *job = NULL;
void (*func)(void *);
void *data;
sync();
- if (list_empty(&cpu->job_queue))
+ if (list_empty(&cpu->job_queue) && list_empty(&global_job_queue))
return;
lock(&cpu->job_lock);
while (true) {
bool no_return;
- if (list_empty(&cpu->job_queue))
- break;
- smt_medium();
- job = list_pop(&cpu->job_queue, struct cpu_job, link);
+ if (list_empty(&cpu->job_queue)) {
+ smt_medium();
+ if (list_empty(&global_job_queue))
+ break;
+ lock(&global_job_queue_lock);
+ job = list_pop(&global_job_queue, struct cpu_job, link);
+ unlock(&global_job_queue_lock);
+ } else {
+ smt_medium();
+ job = list_pop(&cpu->job_queue, struct cpu_job, link);
+ }
+
if (!job)
break;
+
func = job->func;
data = job->data;
no_return = job->no_return;
unlock(&cpu->job_lock);
if (no_return)
free(job);
+ prlog(PR_TRACE, "running job %s on %x\n", job->name, cpu->pir);
func(data);
lock(&cpu->job_lock);
if (!no_return) {
@@ -192,6 +212,27 @@ void cpu_process_jobs(void)
unlock(&cpu->job_lock);
}
+void cpu_process_local_jobs(void)
+{
+ struct cpu_thread *cpu = first_available_cpu();
+
+ while (cpu) {
+ if (cpu != this_cpu())
+ break;
+
+ cpu = next_available_cpu(cpu);
+ if (!cpu)
+ cpu = first_available_cpu();
+
+ /* No CPU to run on, just run synchro */
+ if (cpu == this_cpu()) {
+ printf("Processing jobs synchronously\n");
+ cpu_process_jobs();
+ }
+ }
+}
+
+
struct dt_node *get_cpu_node(u32 pir)
{
struct cpu_thread *t = find_cpu_by_pir(pir);
@@ -413,6 +454,8 @@ void init_boot_cpu(void)
init_cpu_thread(boot_cpu, cpu_state_active, pir);
init_boot_tracebuf(boot_cpu);
assert(this_cpu() == boot_cpu);
+
+ list_head_init(&global_job_queue);
}
void init_all_cpus(void)
@@ -568,7 +611,8 @@ static int64_t opal_start_cpu_thread(uint64_t server_no, uint64_t start_address)
prerror("OPAL: CPU not active in OPAL !\n");
return OPAL_WRONG_STATE;
}
- job = __cpu_queue_job(cpu, opal_start_thread_job, (void *)start_address,
+ job = __cpu_queue_job(cpu, "start_thread",
+ opal_start_thread_job, (void *)start_address,
true);
unlock(&reinit_lock);
if (!job) {
@@ -648,7 +692,8 @@ static int64_t cpu_change_all_hile(bool hile)
cpu_change_hile(&hile);
continue;
}
- cpu_wait_job(cpu_queue_job(cpu, cpu_change_hile, &hile), true);
+ cpu_wait_job(cpu_queue_job(cpu, "cpu_change_hile",
+ cpu_change_hile, &hile), true);
}
return OPAL_SUCCESS;
}