aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorEmanuele Giuseppe Esposito <eesposit@redhat.com>2022-09-26 05:32:11 -0400
committerKevin Wolf <kwolf@redhat.com>2022-10-07 12:11:41 +0200
commit6f592e5aca1a27fe1c1f661cfe68b35b90850acf (patch)
tree74f0dbf7c423bb4361cf0bb5c008a8b50de7fe6e /tests
parent2fc3bdc3843d2d8bde54c2be4d4f4cc8a9ffcf50 (diff)
downloadqemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.zip
qemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.tar.gz
qemu-6f592e5aca1a27fe1c1f661cfe68b35b90850acf.tar.bz2
job.c: enable job lock/unlock and remove Aiocontext locks
Change the job_{lock/unlock} and macros to use job_mutex. Now that they are not nop anymore, remove the aiocontext to avoid deadlocks. Therefore: - when possible, remove completely the aiocontext lock/unlock pair - if it is used by some other function too, reduce the locking section as much as possible, leaving the job API outside. - change AIO_WAIT_WHILE in AIO_WAIT_WHILE_UNLOCKED, since we are not using the aiocontext lock anymore The only functions that still need the aiocontext lock are: - the JobDriver callbacks, already documented in job.h - job_cancel_sync() in replication.c is called with aio_context_lock taken, but now job is using AIO_WAIT_WHILE_UNLOCKED so we need to release the lock. Reduce the locking section to only cover the callback invocation and document the functions that take the AioContext lock, to avoid taking it twice. Also remove real_job_{lock/unlock}, as they are replaced by the public functions. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Message-Id: <20220926093214.506243-19-eesposit@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/unit/test-bdrv-drain.c4
-rw-r--r--tests/unit/test-block-iothread.c2
-rw-r--r--tests/unit/test-blockjob.c19
3 files changed, 11 insertions, 14 deletions
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 0db056e..4924ceb 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -930,9 +930,9 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
tjob->prepare_ret = -EIO;
break;
}
+ aio_context_release(ctx);
job_start(&job->job);
- aio_context_release(ctx);
if (use_iothread) {
/* job_co_entry() is run in the I/O thread, wait for the actual job
@@ -1016,12 +1016,12 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
}
- aio_context_acquire(ctx);
WITH_JOB_LOCK_GUARD() {
ret = job_complete_sync_locked(&job->job, &error_abort);
}
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
+ aio_context_acquire(ctx);
if (use_iothread) {
blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c
index 96fd21c..def0709 100644
--- a/tests/unit/test-block-iothread.c
+++ b/tests/unit/test-block-iothread.c
@@ -582,10 +582,10 @@ static void test_attach_blockjob(void)
aio_poll(qemu_get_aio_context(), false);
}
- aio_context_acquire(ctx);
WITH_JOB_LOCK_GUARD() {
job_complete_sync_locked(&tjob->common.job, &error_abort);
}
+ aio_context_acquire(ctx);
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
aio_context_release(ctx);
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
index e4f126b..f88e10e 100644
--- a/tests/unit/test-blockjob.c
+++ b/tests/unit/test-blockjob.c
@@ -228,10 +228,7 @@ static void cancel_common(CancelJob *s)
BlockJob *job = &s->common;
BlockBackend *blk = s->blk;
JobStatus sts = job->job.status;
- AioContext *ctx;
-
- ctx = job->job.aio_context;
- aio_context_acquire(ctx);
+ AioContext *ctx = job->job.aio_context;
job_cancel_sync(&job->job, true);
WITH_JOB_LOCK_GUARD() {
@@ -242,9 +239,11 @@ static void cancel_common(CancelJob *s)
assert(job->job.status == JOB_STATUS_NULL);
job_unref_locked(&job->job);
}
- destroy_blk(blk);
+ aio_context_acquire(ctx);
+ destroy_blk(blk);
aio_context_release(ctx);
+
}
static void test_cancel_created(void)
@@ -384,12 +383,10 @@ static void test_cancel_concluded(void)
aio_poll(qemu_get_aio_context(), true);
assert_job_status_is(job, JOB_STATUS_PENDING);
- aio_context_acquire(job->aio_context);
WITH_JOB_LOCK_GUARD() {
job_finalize_locked(job, &error_abort);
+ assert(job->status == JOB_STATUS_CONCLUDED);
}
- aio_context_release(job->aio_context);
- assert_job_status_is(job, JOB_STATUS_CONCLUDED);
cancel_common(s);
}
@@ -481,13 +478,11 @@ static void test_complete_in_standby(void)
/* Wait for the job to become READY */
job_start(job);
- aio_context_acquire(ctx);
/*
* Here we are waiting for the status to change, so don't bother
* protecting the read every time.
*/
- AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY);
- aio_context_release(ctx);
+ AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY);
/* Begin the drained section, pausing the job */
bdrv_drain_all_begin();
@@ -497,6 +492,7 @@ static void test_complete_in_standby(void)
aio_context_acquire(ctx);
/* This will schedule the job to resume it */
bdrv_drain_all_end();
+ aio_context_release(ctx);
WITH_JOB_LOCK_GUARD() {
/* But the job cannot run, so it will remain on standby */
@@ -515,6 +511,7 @@ static void test_complete_in_standby(void)
job_dismiss_locked(&job, &error_abort);
}
+ aio_context_acquire(ctx);
destroy_blk(blk);
aio_context_release(ctx);
iothread_join(iothread);