From f7bbb1564d389e180c2dee7fbbad78b381e4d4ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= Date: Thu, 22 Sep 2022 10:49:24 +0200 Subject: test-coroutine: add missing coroutine_fn annotations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Callers of coroutine_fn must be coroutine_fn themselves, or the call must be within "if (qemu_in_coroutine())". Apply coroutine_fn to functions where this holds. Signed-off-by: Marc-André Lureau Reviewed-by: Alberto Faria Signed-off-by: Paolo Bonzini Message-Id: <20220922084924.201610-27-pbonzini@redhat.com> Reviewed-by: Kevin Wolf Signed-off-by: Kevin Wolf --- tests/unit/test-coroutine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/unit/test-coroutine.c b/tests/unit/test-coroutine.c index aa77a3b..e16b80c 100644 --- a/tests/unit/test-coroutine.c +++ b/tests/unit/test-coroutine.c @@ -610,7 +610,7 @@ static void perf_baseline(void) g_test_message("Function call %u iterations: %f s", maxcycles, duration); } -static __attribute__((noinline)) void perf_cost_func(void *opaque) +static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque) { qemu_coroutine_yield(); } -- cgit v1.1 From 191e7af3949b51a0a675a3e13539c17140dc2fba Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 26 Sep 2022 05:32:02 -0400 Subject: jobs: use job locks also in the unit tests Add missing job synchronization in the unit tests, with explicit locks. We are deliberately using _locked functions wrapped by a guard instead of a normal call because the normal call will be removed in future, as the only usage is limited to the tests. In other words, if a function like job_pause() is/will be only used in tests to avoid: WITH_JOB_LOCK_GUARD(){ job_pause_locked(); } then it is not worth keeping job_pause(), and just use the guard. Note: at this stage, job_{lock/unlock} and job lock guard macros are *nop*. Signed-off-by: Emanuele Giuseppe Esposito Reviewed-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Kevin Wolf Message-Id: <20220926093214.506243-10-eesposit@redhat.com> Signed-off-by: Kevin Wolf --- tests/unit/test-bdrv-drain.c | 76 +++++++++++++++++--------- tests/unit/test-block-iothread.c | 8 ++- tests/unit/test-blockjob-txn.c | 24 +++++--- tests/unit/test-blockjob.c | 115 ++++++++++++++++++++++++--------------- 4 files changed, 140 insertions(+), 83 deletions(-) (limited to 'tests') diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 36be84a..0db056e 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -943,61 +943,83 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, } } - g_assert_cmpint(job->job.pause_count, ==, 0); - g_assert_false(job->job.paused); - g_assert_true(tjob->running); - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + WITH_JOB_LOCK_GUARD() { + g_assert_cmpint(job->job.pause_count, ==, 0); + g_assert_false(job->job.paused); + g_assert_true(tjob->running); + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + } do_drain_begin_unlocked(drain_type, drain_bs); - if (drain_type == BDRV_DRAIN_ALL) { - /* bdrv_drain_all() drains both src and target */ - g_assert_cmpint(job->job.pause_count, ==, 2); - } else { - g_assert_cmpint(job->job.pause_count, ==, 1); + WITH_JOB_LOCK_GUARD() { + if (drain_type == BDRV_DRAIN_ALL) { + /* bdrv_drain_all() drains both src and target */ + g_assert_cmpint(job->job.pause_count, ==, 2); + } else { + g_assert_cmpint(job->job.pause_count, ==, 1); + } + g_assert_true(job->job.paused); + g_assert_false(job->job.busy); /* The job is paused */ } - g_assert_true(job->job.paused); - g_assert_false(job->job.busy); /* The job is paused */ do_drain_end_unlocked(drain_type, drain_bs); if (use_iothread) { - /* paused is reset in the I/O thread, wait for it */ + /* + * Here we are waiting for the paused status to change, + * so don't bother protecting the read every time. + * + * paused is reset in the I/O thread, wait for it + */ while (job->job.paused) { aio_poll(qemu_get_aio_context(), false); } } - g_assert_cmpint(job->job.pause_count, ==, 0); - g_assert_false(job->job.paused); - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + WITH_JOB_LOCK_GUARD() { + g_assert_cmpint(job->job.pause_count, ==, 0); + g_assert_false(job->job.paused); + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + } do_drain_begin_unlocked(drain_type, target); - if (drain_type == BDRV_DRAIN_ALL) { - /* bdrv_drain_all() drains both src and target */ - g_assert_cmpint(job->job.pause_count, ==, 2); - } else { - g_assert_cmpint(job->job.pause_count, ==, 1); + WITH_JOB_LOCK_GUARD() { + if (drain_type == BDRV_DRAIN_ALL) { + /* bdrv_drain_all() drains both src and target */ + g_assert_cmpint(job->job.pause_count, ==, 2); + } else { + g_assert_cmpint(job->job.pause_count, ==, 1); + } + g_assert_true(job->job.paused); + g_assert_false(job->job.busy); /* The job is paused */ } - g_assert_true(job->job.paused); - g_assert_false(job->job.busy); /* The job is paused */ do_drain_end_unlocked(drain_type, target); if (use_iothread) { - /* paused is reset in the I/O thread, wait for it */ + /* + * Here we are waiting for the paused status to change, + * so don't bother protecting the read every time. + * + * paused is reset in the I/O thread, wait for it + */ while (job->job.paused) { aio_poll(qemu_get_aio_context(), false); } } - g_assert_cmpint(job->job.pause_count, ==, 0); - g_assert_false(job->job.paused); - g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + WITH_JOB_LOCK_GUARD() { + g_assert_cmpint(job->job.pause_count, ==, 0); + g_assert_false(job->job.paused); + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ + } aio_context_acquire(ctx); - ret = job_complete_sync(&job->job, &error_abort); + WITH_JOB_LOCK_GUARD() { + ret = job_complete_sync_locked(&job->job, &error_abort); + } g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); if (use_iothread) { diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 8b55ecc..96fd21c 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -583,7 +583,9 @@ static void test_attach_blockjob(void) } aio_context_acquire(ctx); - job_complete_sync(&tjob->common.job, &error_abort); + WITH_JOB_LOCK_GUARD() { + job_complete_sync_locked(&tjob->common.job, &error_abort); + } blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); aio_context_release(ctx); @@ -757,7 +759,9 @@ static void test_propagate_mirror(void) BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT, false, "filter_node", MIRROR_COPY_MODE_BACKGROUND, &error_abort); - job = job_get("job0"); + WITH_JOB_LOCK_GUARD() { + job = job_get_locked("job0"); + } filter = bdrv_find_node("filter_node"); /* Change the AioContext of src */ diff --git a/tests/unit/test-blockjob-txn.c b/tests/unit/test-blockjob-txn.c index c69028b..d3b0bb2 100644 --- a/tests/unit/test-blockjob-txn.c +++ b/tests/unit/test-blockjob-txn.c @@ -116,8 +116,10 @@ static void test_single_job(int expected) job = test_block_job_start(1, true, expected, &result, txn); job_start(&job->job); - if (expected == -ECANCELED) { - job_cancel(&job->job, false); + WITH_JOB_LOCK_GUARD() { + if (expected == -ECANCELED) { + job_cancel_locked(&job->job, false); + } } while (result == -EINPROGRESS) { @@ -160,13 +162,15 @@ static void test_pair_jobs(int expected1, int expected2) /* Release our reference now to trigger as many nice * use-after-free bugs as possible. */ - job_txn_unref(txn); + WITH_JOB_LOCK_GUARD() { + job_txn_unref_locked(txn); - if (expected1 == -ECANCELED) { - job_cancel(&job1->job, false); - } - if (expected2 == -ECANCELED) { - job_cancel(&job2->job, false); + if (expected1 == -ECANCELED) { + job_cancel_locked(&job1->job, false); + } + if (expected2 == -ECANCELED) { + job_cancel_locked(&job2->job, false); + } } while (result1 == -EINPROGRESS || result2 == -EINPROGRESS) { @@ -219,7 +223,9 @@ static void test_pair_jobs_fail_cancel_race(void) job_start(&job1->job); job_start(&job2->job); - job_cancel(&job1->job, false); + WITH_JOB_LOCK_GUARD() { + job_cancel_locked(&job1->job, false); + } /* Now make job2 finish before the main loop kicks jobs. This simulates * the race between a pending kick and another job completing. diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index 4c9e1bf..e4f126b 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -211,8 +211,11 @@ static CancelJob *create_common(Job **pjob) bjob = mk_job(blk, "Steve", &test_cancel_driver, true, JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); job = &bjob->job; - job_ref(job); - assert(job->status == JOB_STATUS_CREATED); + WITH_JOB_LOCK_GUARD() { + job_ref_locked(job); + assert(job->status == JOB_STATUS_CREATED); + } + s = container_of(bjob, CancelJob, common); s->blk = blk; @@ -231,12 +234,14 @@ static void cancel_common(CancelJob *s) aio_context_acquire(ctx); job_cancel_sync(&job->job, true); - if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { - Job *dummy = &job->job; - job_dismiss(&dummy, &error_abort); + WITH_JOB_LOCK_GUARD() { + if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { + Job *dummy = &job->job; + job_dismiss_locked(&dummy, &error_abort); + } + assert(job->job.status == JOB_STATUS_NULL); + job_unref_locked(&job->job); } - assert(job->job.status == JOB_STATUS_NULL); - job_unref(&job->job); destroy_blk(blk); aio_context_release(ctx); @@ -251,6 +256,13 @@ static void test_cancel_created(void) cancel_common(s); } +static void assert_job_status_is(Job *job, int status) +{ + WITH_JOB_LOCK_GUARD() { + assert(job->status == status); + } +} + static void test_cancel_running(void) { Job *job; @@ -259,7 +271,7 @@ static void test_cancel_running(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); cancel_common(s); } @@ -272,11 +284,12 @@ static void test_cancel_paused(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); - - job_user_pause(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_RUNNING); + job_user_pause_locked(job, &error_abort); + } job_enter(job); - assert(job->status == JOB_STATUS_PAUSED); + assert_job_status_is(job, JOB_STATUS_PAUSED); cancel_common(s); } @@ -289,11 +302,11 @@ static void test_cancel_ready(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); + assert_job_status_is(job, JOB_STATUS_READY); cancel_common(s); } @@ -306,15 +319,16 @@ static void test_cancel_standby(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); - - job_user_pause(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_READY); + job_user_pause_locked(job, &error_abort); + } job_enter(job); - assert(job->status == JOB_STATUS_STANDBY); + assert_job_status_is(job, JOB_STATUS_STANDBY); cancel_common(s); } @@ -327,20 +341,21 @@ static void test_cancel_pending(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); - - job_complete(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_READY); + job_complete_locked(job, &error_abort); + } job_enter(job); while (!job->deferred_to_main_loop) { aio_poll(qemu_get_aio_context(), true); } - assert(job->status == JOB_STATUS_READY); + assert_job_status_is(job, JOB_STATUS_READY); aio_poll(qemu_get_aio_context(), true); - assert(job->status == JOB_STATUS_PENDING); + assert_job_status_is(job, JOB_STATUS_PENDING); cancel_common(s); } @@ -353,25 +368,28 @@ static void test_cancel_concluded(void) s = create_common(&job); job_start(job); - assert(job->status == JOB_STATUS_RUNNING); + assert_job_status_is(job, JOB_STATUS_RUNNING); s->should_converge = true; job_enter(job); - assert(job->status == JOB_STATUS_READY); - - job_complete(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + assert(job->status == JOB_STATUS_READY); + job_complete_locked(job, &error_abort); + } job_enter(job); while (!job->deferred_to_main_loop) { aio_poll(qemu_get_aio_context(), true); } - assert(job->status == JOB_STATUS_READY); + assert_job_status_is(job, JOB_STATUS_READY); aio_poll(qemu_get_aio_context(), true); - assert(job->status == JOB_STATUS_PENDING); + assert_job_status_is(job, JOB_STATUS_PENDING); aio_context_acquire(job->aio_context); - job_finalize(job, &error_abort); + WITH_JOB_LOCK_GUARD() { + job_finalize_locked(job, &error_abort); + } aio_context_release(job->aio_context); - assert(job->status == JOB_STATUS_CONCLUDED); + assert_job_status_is(job, JOB_STATUS_CONCLUDED); cancel_common(s); } @@ -459,36 +477,43 @@ static void test_complete_in_standby(void) bjob = mk_job(blk, "job", &test_yielding_driver, true, JOB_MANUAL_FINALIZE | JOB_MANUAL_DISMISS); job = &bjob->job; - assert(job->status == JOB_STATUS_CREATED); + assert_job_status_is(job, JOB_STATUS_CREATED); /* Wait for the job to become READY */ job_start(job); aio_context_acquire(ctx); + /* + * Here we are waiting for the status to change, so don't bother + * protecting the read every time. + */ AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); aio_context_release(ctx); /* Begin the drained section, pausing the job */ bdrv_drain_all_begin(); - assert(job->status == JOB_STATUS_STANDBY); + assert_job_status_is(job, JOB_STATUS_STANDBY); + /* Lock the IO thread to prevent the job from being run */ aio_context_acquire(ctx); /* This will schedule the job to resume it */ bdrv_drain_all_end(); - /* But the job cannot run, so it will remain on standby */ - assert(job->status == JOB_STATUS_STANDBY); + WITH_JOB_LOCK_GUARD() { + /* But the job cannot run, so it will remain on standby */ + assert(job->status == JOB_STATUS_STANDBY); - /* Even though the job is on standby, this should work */ - job_complete(job, &error_abort); + /* Even though the job is on standby, this should work */ + job_complete_locked(job, &error_abort); - /* The test is done now, clean up. */ - job_finish_sync(job, NULL, &error_abort); - assert(job->status == JOB_STATUS_PENDING); + /* The test is done now, clean up. */ + job_finish_sync_locked(job, NULL, &error_abort); + assert(job->status == JOB_STATUS_PENDING); - job_finalize(job, &error_abort); - assert(job->status == JOB_STATUS_CONCLUDED); + job_finalize_locked(job, &error_abort); + assert(job->status == JOB_STATUS_CONCLUDED); - job_dismiss(&job, &error_abort); + job_dismiss_locked(&job, &error_abort); + } destroy_blk(blk); aio_context_release(ctx); -- cgit v1.1 From 6f592e5aca1a27fe1c1f661cfe68b35b90850acf Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 26 Sep 2022 05:32:11 -0400 Subject: job.c: enable job lock/unlock and remove Aiocontext locks Change the job_{lock/unlock} and macros to use job_mutex. Now that they are not nop anymore, remove the aiocontext to avoid deadlocks. Therefore: - when possible, remove completely the aiocontext lock/unlock pair - if it is used by some other function too, reduce the locking section as much as possible, leaving the job API outside. - change AIO_WAIT_WHILE in AIO_WAIT_WHILE_UNLOCKED, since we are not using the aiocontext lock anymore The only functions that still need the aiocontext lock are: - the JobDriver callbacks, already documented in job.h - job_cancel_sync() in replication.c is called with aio_context_lock taken, but now job is using AIO_WAIT_WHILE_UNLOCKED so we need to release the lock. Reduce the locking section to only cover the callback invocation and document the functions that take the AioContext lock, to avoid taking it twice. Also remove real_job_{lock/unlock}, as they are replaced by the public functions. Signed-off-by: Emanuele Giuseppe Esposito Message-Id: <20220926093214.506243-19-eesposit@redhat.com> Reviewed-by: Kevin Wolf Reviewed-by: Vladimir Sementsov-Ogievskiy Signed-off-by: Kevin Wolf --- tests/unit/test-bdrv-drain.c | 4 ++-- tests/unit/test-block-iothread.c | 2 +- tests/unit/test-blockjob.c | 19 ++++++++----------- 3 files changed, 11 insertions(+), 14 deletions(-) (limited to 'tests') diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 0db056e..4924ceb 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -930,9 +930,9 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, tjob->prepare_ret = -EIO; break; } + aio_context_release(ctx); job_start(&job->job); - aio_context_release(ctx); if (use_iothread) { /* job_co_entry() is run in the I/O thread, wait for the actual job @@ -1016,12 +1016,12 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ } - aio_context_acquire(ctx); WITH_JOB_LOCK_GUARD() { ret = job_complete_sync_locked(&job->job, &error_abort); } g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO)); + aio_context_acquire(ctx); if (use_iothread) { blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort); assert(blk_get_aio_context(blk_target) == qemu_get_aio_context()); diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 96fd21c..def0709 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -582,10 +582,10 @@ static void test_attach_blockjob(void) aio_poll(qemu_get_aio_context(), false); } - aio_context_acquire(ctx); WITH_JOB_LOCK_GUARD() { job_complete_sync_locked(&tjob->common.job, &error_abort); } + aio_context_acquire(ctx); blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort); aio_context_release(ctx); diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index e4f126b..f88e10e 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -228,10 +228,7 @@ static void cancel_common(CancelJob *s) BlockJob *job = &s->common; BlockBackend *blk = s->blk; JobStatus sts = job->job.status; - AioContext *ctx; - - ctx = job->job.aio_context; - aio_context_acquire(ctx); + AioContext *ctx = job->job.aio_context; job_cancel_sync(&job->job, true); WITH_JOB_LOCK_GUARD() { @@ -242,9 +239,11 @@ static void cancel_common(CancelJob *s) assert(job->job.status == JOB_STATUS_NULL); job_unref_locked(&job->job); } - destroy_blk(blk); + aio_context_acquire(ctx); + destroy_blk(blk); aio_context_release(ctx); + } static void test_cancel_created(void) @@ -384,12 +383,10 @@ static void test_cancel_concluded(void) aio_poll(qemu_get_aio_context(), true); assert_job_status_is(job, JOB_STATUS_PENDING); - aio_context_acquire(job->aio_context); WITH_JOB_LOCK_GUARD() { job_finalize_locked(job, &error_abort); + assert(job->status == JOB_STATUS_CONCLUDED); } - aio_context_release(job->aio_context); - assert_job_status_is(job, JOB_STATUS_CONCLUDED); cancel_common(s); } @@ -481,13 +478,11 @@ static void test_complete_in_standby(void) /* Wait for the job to become READY */ job_start(job); - aio_context_acquire(ctx); /* * Here we are waiting for the status to change, so don't bother * protecting the read every time. */ - AIO_WAIT_WHILE(ctx, job->status != JOB_STATUS_READY); - aio_context_release(ctx); + AIO_WAIT_WHILE_UNLOCKED(ctx, job->status != JOB_STATUS_READY); /* Begin the drained section, pausing the job */ bdrv_drain_all_begin(); @@ -497,6 +492,7 @@ static void test_complete_in_standby(void) aio_context_acquire(ctx); /* This will schedule the job to resume it */ bdrv_drain_all_end(); + aio_context_release(ctx); WITH_JOB_LOCK_GUARD() { /* But the job cannot run, so it will remain on standby */ @@ -515,6 +511,7 @@ static void test_complete_in_standby(void) job_dismiss_locked(&job, &error_abort); } + aio_context_acquire(ctx); destroy_blk(blk); aio_context_release(ctx); iothread_join(iothread); -- cgit v1.1 From 9bd4d3c2e3d2e1df979e818ff0a5c05ca455721a Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 26 Sep 2022 05:32:14 -0400 Subject: job: remove unused functions These public functions are not used anywhere, thus can be dropped. Also, since this is the final job API that doesn't use AioContext lock and replaces it with job_lock, adjust all remaining function documentation to clearly specify if the job lock is taken or not. Also document the locking requirements for a few functions where the second version is not removed. Signed-off-by: Emanuele Giuseppe Esposito Reviewed-by: Stefan Hajnoczi Reviewed-by: Kevin Wolf Message-Id: <20220926093214.506243-22-eesposit@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy Signed-off-by: Kevin Wolf --- tests/unit/test-blockjob.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index f88e10e..c0426bd 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -432,7 +432,7 @@ static const BlockJobDriver test_yielding_driver = { }; /* - * Test that job_complete() works even on jobs that are in a paused + * Test that job_complete_locked() works even on jobs that are in a paused * state (i.e., STANDBY). * * To do this, run YieldingJob in an IO thread, get it into the READY @@ -440,7 +440,7 @@ static const BlockJobDriver test_yielding_driver = { * acquire the context so the job will not be entered and will thus * remain on STANDBY. * - * job_complete() should still work without error. + * job_complete_locked() should still work without error. * * Note that on the QMP interface, it is impossible to lock an IO * thread before a drained section ends. In practice, the -- cgit v1.1