aboutsummaryrefslogtreecommitdiff
path: root/tests/unit
diff options
context:
space:
mode:
Diffstat (limited to 'tests/unit')
-rw-r--r--tests/unit/test-bdrv-drain.c80
-rw-r--r--tests/unit/test-block-iothread.c8
-rw-r--r--tests/unit/test-blockjob-txn.c24
-rw-r--r--tests/unit/test-blockjob.c136
-rw-r--r--tests/unit/test-coroutine.c2
5 files changed, 152 insertions, 98 deletions
diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c
index 36be84a..4924ceb 100644
--- a/tests/unit/test-bdrv-drain.c
+++ b/tests/unit/test-bdrv-drain.c
@@ -930,9 +930,9 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
tjob->prepare_ret = -EIO;
break;
}
+ aio_context_release(ctx);
job_start(&job->job);
- aio_context_release(ctx);
if (use_iothread) {
/* job_co_entry() is run in the I/O thread, wait for the actual job
@@ -943,63 +943,85 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
}
}
- g_assert_cmpint(job->job.pause_count, ==, 0);
- g_assert_false(job->job.paused);
- g_assert_true(tjob->running);
- g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
+ WITH_JOB_LOCK_GUARD() {
+ g_assert_cmpint(job->job.pause_count, ==, 0);
+ g_assert_false(job->job.paused);
+ g_assert_true(tjob->running);
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
+ }
do_drain_begin_unlocked(drain_type, drain_bs);
- if (drain_type == BDRV_DRAIN_ALL) {
- /* bdrv_drain_all() drains both src and target */
- g_assert_cmpint(job->job.pause_count, ==, 2);
- } else {
- g_assert_cmpint(job->job.pause_count, ==, 1);
+ WITH_JOB_LOCK_GUARD() {
+ if (drain_type == BDRV_DRAIN_ALL) {
+ /* bdrv_drain_all() drains both src and target */
+ g_assert_cmpint(job->job.pause_count, ==, 2);
+ } else {
+ g_assert_cmpint(job->job.pause_count, ==, 1);
+ }
+ g_assert_true(job->job.paused);
+ g_assert_false(job->job.busy); /* The job is paused */
}
- g_assert_true(job->job.paused);
- g_assert_false(job->job.busy); /* The job is paused */
do_drain_end_unlocked(drain_type, drain_bs);
if (use_iothread) {
- /* paused is reset in the I/O thread, wait for it */
+ /*
+ * Here we are waiting for the paused status to change,
+ * so don't bother protecting the read every time.
+ *
+ * paused is reset in the I/O thread, wait for it
+ */
while (job->job.paused) {
aio_poll(qemu_get_aio_context(), false);
}
}
- g_assert_cmpint(job->job.pause_count, ==, 0);
- g_assert_false(job->job.paused);
- g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
+ WITH_JOB_LOCK_GUARD() {
+ g_assert_cmpint(job->job.pause_count, ==, 0);
+ g_assert_false(job->job.paused);
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
+ }
do_drain_begin_unlocked(drain_type, target);
- if (drain_type == BDRV_DRAIN_ALL) {
- /* bdrv_drain_all() drains both src and target */
- g_assert_cmpint(job->job.pause_count, ==, 2);
- } else {
- g_assert_cmpint(job->job.pause_count, ==, 1);
+ WITH_JOB_LOCK_GUARD() {
+ if (drain_type == BDRV_DRAIN_ALL) {
+ /* bdrv_drain_all() drains both src and target */
+ g_assert_cmpint(job->job.pause_count, ==, 2);
+ } else {
+ g_assert_cmpint(job->job.pause_count, ==, 1);
+ }
+ g_assert_true(job->job.paused);
+ g_assert_false(job->job.busy); /* The job is paused */
}
- g_assert_true(job->job.paused);
- g_assert_false(job->job.busy); /* The job is paused */
do_drain_end_unlocked(drain_type, target);
if (use_iothread) {
- /* paused is reset in the I/O thread, wait for it */
+ /*
+ * Here we are waiting for the paused status to change,
+ * so don't bother protecting the read every time.
+ *
+ * paused is reset in the I/O thread, wait for it
+ */
while (job->job.paused) {
aio_poll(qemu_get_aio_context(), false);
}
}
- g_assert_cmpint(job->job.pause_count, ==, 0);
- g_assert_false(job->job.paused);
- g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
+ WITH_JOB_LOCK_GUARD() {
+ g_assert_cmpint(job->job.pause_count, ==, 0);
+ g_assert_false(job->job.paused);
+ g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */
+ }
- aio_context_acquire(ctx);
- ret = job_complete_sync(&job->job, &error_abort);
+ WITH_JOB_LOCK_GUARD() {
+ ret = job_complete_sync_locked(&job->job, &error_abort);
+ }
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
+ aio_context_acquire(ctx);
if (use_iothread) {
blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
assert(blk_get_aio_context(blk_target) == qemu_get_aio_context());
diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c
index 8b55ecc..def0709 100644
--- a/tests/unit/test-block-iothread.c
+++ b/tests/unit/test-block-iothread.c
@@ -582,8 +582,10 @@ static void test_attach_blockjob(void)
aio_poll(qemu_get_aio_context(), false);
}
+ WITH_JOB_LOCK_GUARD() {
+ job_complete_sync_locked(&tjob->common.job, &error_abort);
+ }
aio_context_acquire(ctx);
- job_complete_sync(&tjob->common.job, &error_abort);
blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
aio_context_release(ctx);
@@ -757,7 +759,9 @@ static void test_propagate_mirror(void)
BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
false, "filter_node", MIRROR_COPY_MODE_BACKGROUND,
&error_abort);
- job = job_get("job0");
+ WITH_JOB_LOCK_GUARD() {
+ job = job_get_locked("job0");
+ }
filter = bdrv_find_node("filter_node");
/* Change the AioContext of src */
diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c
index c69028b..d3b0bb2 100644
--- a/tests/unit/test-blockjob-txn.c
+++ b/tests/unit/test-blockjob-txn.c
@@ -116,8 +116,10 @@ static void test_single_job(int expected)
job = test_block_job_start(1, true, expected, &result, txn);
job_start(&job->job);
- if (expected == -ECANCELED) {
- job_cancel(&job->job, false);
+ WITH_JOB_LOCK_GUARD() {
+ if (expected == -ECANCELED) {
+ job_cancel_locked(&job->job, false);
+ }
}
while (result == -EINPROGRESS) {
@@ -160,13 +162,15 @@ static void test_pair_jobs(int expected1, int expected2)
/* Release our reference now to trigger as many nice
* use-after-free bugs as possible.
*/
- job_txn_unref(txn);
+ WITH_JOB_LOCK_GUARD() {
+ job_txn_unref_locked(txn);
- if (expected1 == -ECANCELED) {
- job_cancel(&job1->job, false);
- }
- if (expected2 == -ECANCELED) {
- job_cancel(&job2->job, false);
+ if (expected1 == -ECANCELED) {
+ job_cancel_locked(&job1->job, false);
+ }
+ if (expected2 == -ECANCELED) {
+ job_cancel_locked(&job2->job, false);
+ }
}
while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) {
@@ -219,7 +223,9 @@ static void test_pair_jobs_fail_cancel_race(void)
job_start(&job1->job);
job_start(&job2->job);
- job_cancel(&job1->job, false);
+ WITH_JOB_LOCK_GUARD() {
+ job_cancel_locked(&job1->job, false);
+ }
/* Now make job2 finish before the main loop kicks jobs. This simulates
* the race between a pending kick and another job completing.
diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c
index 4c9e1bf..c0426bd 100644
--- a/tests/unit/test-blockjob.c
+++ b/tests/unit/test-blockjob.c
@@ -211,8 +211,11 @@ static CancelJob *create_common(Job **pjob)
bjob = mk_job(blk, "Steve", &test_cancel_driver, true,
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
job = &bjob->job;
- job_ref(job);
- assert(job->status == JOB_STATUS_CREATED);
+ WITH_JOB_LOCK_GUARD() {
+ job_ref_locked(job);
+ assert(job->status == JOB_STATUS_CREATED);
+ }
+
s = container_of(bjob, CancelJob, common);
s->blk = blk;
@@ -225,21 +228,22 @@ static void cancel_common(CancelJob *s)
BlockJob *job = &s->common;
BlockBackend *blk = s->blk;
JobStatus sts = job->job.status;
- AioContext *ctx;
-
- ctx = job->job.aio_context;
- aio_context_acquire(ctx);
+ AioContext *ctx = job->job.aio_context;
job_cancel_sync(&job->job, true);
- if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
- Job *dummy = &job->job;
- job_dismiss(&dummy, &error_abort);
+ WITH_JOB_LOCK_GUARD() {
+ if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) {
+ Job *dummy = &job->job;
+ job_dismiss_locked(&dummy, &error_abort);
+ }
+ assert(job->job.status == JOB_STATUS_NULL);
+ job_unref_locked(&job->job);
}
- assert(job->job.status == JOB_STATUS_NULL);
- job_unref(&job->job);
- destroy_blk(blk);
+ aio_context_acquire(ctx);
+ destroy_blk(blk);
aio_context_release(ctx);
+
}
static void test_cancel_created(void)
@@ -251,6 +255,13 @@ static void test_cancel_created(void)
cancel_common(s);
}
+static void assert_job_status_is(Job *job, int status)
+{
+ WITH_JOB_LOCK_GUARD() {
+ assert(job->status == status);
+ }
+}
+
static void test_cancel_running(void)
{
Job *job;
@@ -259,7 +270,7 @@ static void test_cancel_running(void)
s = create_common(&job);
job_start(job);
- assert(job->status == JOB_STATUS_RUNNING);
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
cancel_common(s);
}
@@ -272,11 +283,12 @@ static void test_cancel_paused(void)
s = create_common(&job);
job_start(job);
- assert(job->status == JOB_STATUS_RUNNING);
-
- job_user_pause(job, &error_abort);
+ WITH_JOB_LOCK_GUARD() {
+ assert(job->status == JOB_STATUS_RUNNING);
+ job_user_pause_locked(job, &error_abort);
+ }
job_enter(job);
- assert(job->status == JOB_STATUS_PAUSED);
+ assert_job_status_is(job, JOB_STATUS_PAUSED);
cancel_common(s);
}
@@ -289,11 +301,11 @@ static void test_cancel_ready(void)
s = create_common(&job);
job_start(job);
- assert(job->status == JOB_STATUS_RUNNING);
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
s->should_converge = true;
job_enter(job);
- assert(job->status == JOB_STATUS_READY);
+ assert_job_status_is(job, JOB_STATUS_READY);
cancel_common(s);
}
@@ -306,15 +318,16 @@ static void test_cancel_standby(void)
s = create_common(&job);
job_start(job);
- assert(job->status == JOB_STATUS_RUNNING);
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
s->should_converge = true;
job_enter(job);
- assert(job->status == JOB_STATUS_READY);
-
- job_user_pause(job, &error_abort);
+ WITH_JOB_LOCK_GUARD() {
+ assert(job->status == JOB_STATUS_READY);
+ job_user_pause_locked(job, &error_abort);
+ }
job_enter(job);
- assert(job->status == JOB_STATUS_STANDBY);
+ assert_job_status_is(job, JOB_STATUS_STANDBY);
cancel_common(s);
}
@@ -327,20 +340,21 @@ static void test_cancel_pending(void)
s = create_common(&job);
job_start(job);
- assert(job->status == JOB_STATUS_RUNNING);
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
s->should_converge = true;
job_enter(job);
- assert(job->status == JOB_STATUS_READY);
-
- job_complete(job, &error_abort);
+ WITH_JOB_LOCK_GUARD() {
+ assert(job->status == JOB_STATUS_READY);
+ job_complete_locked(job, &error_abort);
+ }
job_enter(job);
while (!job->deferred_to_main_loop) {
aio_poll(qemu_get_aio_context(), true);
}
- assert(job->status == JOB_STATUS_READY);
+ assert_job_status_is(job, JOB_STATUS_READY);
aio_poll(qemu_get_aio_context(), true);
- assert(job->status == JOB_STATUS_PENDING);
+ assert_job_status_is(job, JOB_STATUS_PENDING);
cancel_common(s);
}
@@ -353,25 +367,26 @@ static void test_cancel_concluded(void)
s = create_common(&job);
job_start(job);
- assert(job->status == JOB_STATUS_RUNNING);
+ assert_job_status_is(job, JOB_STATUS_RUNNING);
s->should_converge = true;
job_enter(job);
- assert(job->status == JOB_STATUS_READY);
-
- job_complete(job, &error_abort);
+ WITH_JOB_LOCK_GUARD() {
+ assert(job->status == JOB_STATUS_READY);
+ job_complete_locked(job, &error_abort);
+ }
job_enter(job);
while (!job->deferred_to_main_loop) {
aio_poll(qemu_get_aio_context(), true);
}
- assert(job->status == JOB_STATUS_READY);
+ assert_job_status_is(job, JOB_STATUS_READY);
aio_poll(qemu_get_aio_context(), true);
- assert(job->status == JOB_STATUS_PENDING);
+ assert_job_status_is(job, JOB_STATUS_PENDING);
- aio_context_acquire(job->aio_context);
- job_finalize(job, &error_abort);
- aio_context_release(job->aio_context);
- assert(job->status == JOB_STATUS_CONCLUDED);
+ WITH_JOB_LOCK_GUARD() {
+ job_finalize_locked(job, &error_abort);
+ assert(job->status == JOB_STATUS_CONCLUDED);
+ }
cancel_common(s);
}
@@ -417,7 +432,7 @@ static const BlockJobDriver test_yielding_driver = {
};
/*
- * Test that job_complete() works even on jobs that are in a paused
+ * Test that job_complete_locked() works even on jobs that are in a paused
* state (i.e., STANDBY).
*
* To do this, run YieldingJob in an IO thread, get it into the READY
@@ -425,7 +440,7 @@ static const BlockJobDriver test_yielding_driver = {
* acquire the context so the job will not be entered and will thus
* remain on STANDBY.
*
- * job_complete() should still work without error.
+ * job_complete_locked() should still work without error.
*
* Note that on the QMP interface, it is impossible to lock an IO
* thread before a drained section ends. In practice, the
@@ -459,37 +474,44 @@ static void test_complete_in_standby(void)
bjob = mk_job(blk, "job", &test_yielding_driver, true,
JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS);
job = &bjob->job;
- assert(job->status == JOB_STATUS_CREATED);
+ assert_job_status_is(job, JOB_STATUS_CREATED);
/* Wait for the job to become READY */
job_start(job);
- aio_context_acquire(ctx);
- AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY);
- aio_context_release(ctx);
+ /*
+ * Here we are waiting for the status to change, so don't bother
+ * protecting the read every time.
+ */
+ AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY);
/* Begin the drained section, pausing the job */
bdrv_drain_all_begin();
- assert(job->status == JOB_STATUS_STANDBY);
+ assert_job_status_is(job, JOB_STATUS_STANDBY);
+
/* Lock the IO thread to prevent the job from being run */
aio_context_acquire(ctx);
/* This will schedule the job to resume it */
bdrv_drain_all_end();
+ aio_context_release(ctx);
- /* But the job cannot run, so it will remain on standby */
- assert(job->status == JOB_STATUS_STANDBY);
+ WITH_JOB_LOCK_GUARD() {
+ /* But the job cannot run, so it will remain on standby */
+ assert(job->status == JOB_STATUS_STANDBY);
- /* Even though the job is on standby, this should work */
- job_complete(job, &error_abort);
+ /* Even though the job is on standby, this should work */
+ job_complete_locked(job, &error_abort);
- /* The test is done now, clean up. */
- job_finish_sync(job, NULL, &error_abort);
- assert(job->status == JOB_STATUS_PENDING);
+ /* The test is done now, clean up. */
+ job_finish_sync_locked(job, NULL, &error_abort);
+ assert(job->status == JOB_STATUS_PENDING);
- job_finalize(job, &error_abort);
- assert(job->status == JOB_STATUS_CONCLUDED);
+ job_finalize_locked(job, &error_abort);
+ assert(job->status == JOB_STATUS_CONCLUDED);
- job_dismiss(&job, &error_abort);
+ job_dismiss_locked(&job, &error_abort);
+ }
+ aio_context_acquire(ctx);
destroy_blk(blk);
aio_context_release(ctx);
iothread_join(iothread);
diff --git a/tests/unit/test-coroutine.c b/tests/unit/test-coroutine.c
index aa77a3b..e16b80c 100644
--- a/tests/unit/test-coroutine.c
+++ b/tests/unit/test-coroutine.c
@@ -610,7 +610,7 @@ static void perf_baseline(void)
g_test_message("Function call %u iterations: %f s", maxcycles, duration);
}
-static __attribute__((noinline)) void perf_cost_func(void *opaque)
+static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque)
{
qemu_coroutine_yield();
}