diff options
author | Stefan Reiter <s.reiter@proxmox.com> | 2020-04-07 13:56:49 +0200 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2020-04-07 14:34:47 +0200 |
commit | b660a84bbb0eb1a76b505648d31d5e82594fb75e (patch) | |
tree | 50cd750825acaaf28d5adc0e7b736043b682a8a6 | |
parent | 53ef8a92eb04ee19640f5aad3bff36cd4a36c250 (diff) | |
download | qemu-b660a84bbb0eb1a76b505648d31d5e82594fb75e.zip qemu-b660a84bbb0eb1a76b505648d31d5e82594fb75e.tar.gz qemu-b660a84bbb0eb1a76b505648d31d5e82594fb75e.tar.bz2 |
job: take each job's lock individually in job_txn_apply
All callers of job_txn_apply hold a single job's lock, but different
jobs within a transaction can have different contexts, thus we need to
lock each one individually before applying the callback function.
Similar to job_completed_txn_abort this also requires releasing the
caller's context before and reacquiring it after to avoid recursive
locks which might break AIO_WAIT_WHILE in the callback. This is safe, since
existing code would already have to take this into account, lest
job_completed_txn_abort might have broken.
This also brings to light a different issue: When a callback function in
job_txn_apply moves it's job to a different AIO context, callers will
try to release the wrong lock (now that we re-acquire the lock
correctly, previously it would just continue with the old lock, leaving
the job unlocked for the rest of the return path). Fix this by not caching
the job's context.
This is only necessary for qmp_block_job_finalize, qmp_job_finalize and
job_exit, since everyone else calls through job_exit.
One test needed adapting, since it calls job_finalize directly, so it
manually needs to acquire the correct context.
Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
Message-Id: <20200407115651.69472-2-s.reiter@proxmox.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r-- | blockdev.c | 9 | ||||
-rw-r--r-- | job-qmp.c | 9 | ||||
-rw-r--r-- | job.c | 50 | ||||
-rw-r--r-- | tests/test-blockjob.c | 2 |
4 files changed, 60 insertions, 10 deletions
@@ -3612,7 +3612,16 @@ void qmp_block_job_finalize(const char *id, Error **errp) } trace_qmp_block_job_finalize(job); + job_ref(&job->job); job_finalize(&job->job, errp); + + /* + * Job's context might have changed via job_finalize (and job_txn_apply + * automatically acquires the new one), so make sure we release the correct + * one. + */ + aio_context = blk_get_aio_context(job->blk); + job_unref(&job->job); aio_context_release(aio_context); } @@ -114,7 +114,16 @@ void qmp_job_finalize(const char *id, Error **errp) } trace_qmp_job_finalize(job); + job_ref(job); job_finalize(job, errp); + + /* + * Job's context might have changed via job_finalize (and job_txn_apply + * automatically acquires the new one), so make sure we release the correct + * one. + */ + aio_context = job->aio_context; + job_unref(job); aio_context_release(aio_context); } @@ -136,17 +136,38 @@ static void job_txn_del_job(Job *job) } } -static int job_txn_apply(JobTxn *txn, int fn(Job *)) +static int job_txn_apply(Job *job, int fn(Job *)) { - Job *job, *next; + AioContext *inner_ctx; + Job *other_job, *next; + JobTxn *txn = job->txn; int rc = 0; - QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) { - rc = fn(job); + /* + * Similar to job_completed_txn_abort, we take each job's lock before + * applying fn, but since we assume that outer_ctx is held by the caller, + * we need to release it here to avoid holding the lock twice - which would + * break AIO_WAIT_WHILE from within fn. + */ + job_ref(job); + aio_context_release(job->aio_context); + + QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) { + inner_ctx = other_job->aio_context; + aio_context_acquire(inner_ctx); + rc = fn(other_job); + aio_context_release(inner_ctx); if (rc) { break; } } + + /* + * Note that job->aio_context might have been changed by calling fn, so we + * can't use a local variable to cache it. + */ + aio_context_acquire(job->aio_context); + job_unref(job); return rc; } @@ -774,11 +795,11 @@ static void job_do_finalize(Job *job) assert(job && job->txn); /* prepare the transaction to complete */ - rc = job_txn_apply(job->txn, job_prepare); + rc = job_txn_apply(job, job_prepare); if (rc) { job_completed_txn_abort(job); } else { - job_txn_apply(job->txn, job_finalize_single); + job_txn_apply(job, job_finalize_single); } } @@ -824,10 +845,10 @@ static void job_completed_txn_success(Job *job) assert(other_job->ret == 0); } - job_txn_apply(txn, job_transition_to_pending); + job_txn_apply(job, job_transition_to_pending); /* If no jobs need manual finalization, automatically do so */ - if (job_txn_apply(txn, job_needs_finalize) == 0) { + if (job_txn_apply(job, job_needs_finalize) == 0) { job_do_finalize(job); } } @@ -849,9 +870,10 @@ static void job_completed(Job *job) static void job_exit(void *opaque) { Job *job = (Job *)opaque; - AioContext *ctx = job->aio_context; + AioContext *ctx; - aio_context_acquire(ctx); + job_ref(job); + aio_context_acquire(job->aio_context); /* This is a lie, we're not quiescent, but still doing the completion * callbacks. However, completion callbacks tend to involve operations that @@ -862,6 +884,14 @@ static void job_exit(void *opaque) job_completed(job); + /* + * Note that calling job_completed can move the job to a different + * aio_context, so we cannot cache from above. job_txn_apply takes care of + * acquiring the new lock, and we ref/unref to avoid job_completed freeing + * the job underneath us. + */ + ctx = job->aio_context; + job_unref(job); aio_context_release(ctx); } diff --git a/tests/test-blockjob.c b/tests/test-blockjob.c index 4eeb184..7519847 100644 --- a/tests/test-blockjob.c +++ b/tests/test-blockjob.c @@ -367,7 +367,9 @@ static void test_cancel_concluded(void) aio_poll(qemu_get_aio_context(), true); assert(job->status == JOB_STATUS_PENDING); + aio_context_acquire(job->aio_context); job_finalize(job, &error_abort); + aio_context_release(job->aio_context); assert(job->status == JOB_STATUS_CONCLUDED); cancel_common(s); |