aboutsummaryrefslogtreecommitdiff
path: root/job.c
diff options
context:
space:
mode:
authorEmanuele Giuseppe Esposito <eesposit@redhat.com>2022-09-26 05:31:54 -0400
committerKevin Wolf <kwolf@redhat.com>2022-10-07 12:11:41 +0200
commit55c5a25a0363f153d8875a60001342eb6fe6e4f5 (patch)
tree36e046ad659f66fc2fa588aa6d44dae31a296838 /job.c
parent2ffc10d53b6ab57ab228359709f8703b0b010430 (diff)
downloadqemu-55c5a25a0363f153d8875a60001342eb6fe6e4f5.zip
qemu-55c5a25a0363f153d8875a60001342eb6fe6e4f5.tar.gz
qemu-55c5a25a0363f153d8875a60001342eb6fe6e4f5.tar.bz2
job.c: make job_mutex and job_lock/unlock() public
job mutex will be used to protect the job struct elements and list, replacing AioContext locks. Right now use a shared lock for all jobs, in order to keep things simple. Once the AioContext lock is gone, we can introduce per-job locks. To simplify the switch from aiocontext to job lock, introduce *nop* lock/unlock functions and macros. We want to always call job_lock/unlock outside the AioContext locks, and not vice-versa, otherwise we might get a deadlock. This is not straightforward to do, and that's why we start with nop functions. Once everything is protected by job_lock/unlock, we can change the nop into an actual mutex and remove the aiocontext lock. Since job_mutex is already being used, add static real_job_{lock/unlock} for the existing usage. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Message-Id: <20220926093214.506243-2-eesposit@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'job.c')
-rw-r--r--job.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/job.c b/job.c
index 20f0d8b..7365db8 100644
--- a/job.c
+++ b/job.c
@@ -32,6 +32,12 @@
#include "trace/trace-root.h"
#include "qapi/qapi-events-job.h"
+/*
+ * job_mutex protects the jobs list, but also makes the
+ * struct job fields thread-safe.
+ */
+QemuMutex job_mutex;
+
static QLIST_HEAD(, Job) jobs = QLIST_HEAD_INITIALIZER(jobs);
/* Job State Transition Table */
@@ -74,17 +80,22 @@ struct JobTxn {
int refcnt;
};
-/* Right now, this mutex is only needed to synchronize accesses to job->busy
- * and job->sleep_timer, such as concurrent calls to job_do_yield and
- * job_enter. */
-static QemuMutex job_mutex;
+void job_lock(void)
+{
+ /* nop */
+}
+
+void job_unlock(void)
+{
+ /* nop */
+}
-static void job_lock(void)
+static void real_job_lock(void)
{
qemu_mutex_lock(&job_mutex);
}
-static void job_unlock(void)
+static void real_job_unlock(void)
{
qemu_mutex_unlock(&job_mutex);
}
@@ -450,21 +461,21 @@ void job_enter_cond(Job *job, bool(*fn)(Job *job))
return;
}
- job_lock();
+ real_job_lock();
if (job->busy) {
- job_unlock();
+ real_job_unlock();
return;
}
if (fn && !fn(job)) {
- job_unlock();
+ real_job_unlock();
return;
}
assert(!job->deferred_to_main_loop);
timer_del(&job->sleep_timer);
job->busy = true;
- job_unlock();
+ real_job_unlock();
aio_co_enter(job->aio_context, job->co);
}
@@ -481,13 +492,13 @@ void job_enter(Job *job)
* called explicitly. */
static void coroutine_fn job_do_yield(Job *job, uint64_t ns)
{
- job_lock();
+ real_job_lock();
if (ns != -1) {
timer_mod(&job->sleep_timer, ns);
}
job->busy = false;
job_event_idle(job);
- job_unlock();
+ real_job_unlock();
qemu_coroutine_yield();
/* Set by job_enter_cond() before re-entering the coroutine. */