diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-09-24 14:35:58 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-09-24 14:35:58 +0100 |
commit | d6f71af65410d3e003ba331c5e57eddcf716cbcf (patch) | |
tree | c839d777f3d290b8f430e2a64aca44d1ed4271eb /block | |
parent | 539c251b64d2cdd6aeafde1c6283688915c9aad0 (diff) | |
parent | e21a1c9831fc80ae3f3c1affdfa43350035d8588 (diff) | |
download | qemu-d6f71af65410d3e003ba331c5e57eddcf716cbcf.zip qemu-d6f71af65410d3e003ba331c5e57eddcf716cbcf.tar.gz qemu-d6f71af65410d3e003ba331c5e57eddcf716cbcf.tar.bz2 |
Merge remote-tracking branch 'remotes/xanclic/tags/pull-block-2018-08-31-v2' into staging
Block patches:
- (Block) job exit refactoring, part 1
(removing job_defer_to_main_loop())
- test-bdrv-drain leak fix
# gpg: Signature made Fri 31 Aug 2018 15:30:33 BST
# gpg: using RSA key F407DB0061D5CF40
# gpg: Good signature from "Max Reitz <mreitz@redhat.com>"
# Primary key fingerprint: 91BE B60A 30DB 3E88 57D1 1829 F407 DB00 61D5 CF40
* remotes/xanclic/tags/pull-block-2018-08-31-v2:
jobs: remove job_defer_to_main_loop
jobs: remove ret argument to job_completed; privatize it
block/backup: make function variables consistently named
jobs: utilize job_exit shim
block/mirror: utilize job_exit shim
block/commit: utilize job_exit shim
jobs: add exit shim
jobs: canonize Error object
jobs: change start callback to run callback
tests: fix bdrv-drain leak
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/backup.c | 81 | ||||
-rw-r--r-- | block/commit.c | 29 | ||||
-rw-r--r-- | block/create.c | 19 | ||||
-rw-r--r-- | block/mirror.c | 39 | ||||
-rw-r--r-- | block/stream.c | 29 |
5 files changed, 76 insertions, 121 deletions
diff --git a/block/backup.c b/block/backup.c index 8630d32..4d084f6 100644 --- a/block/backup.c +++ b/block/backup.c @@ -380,18 +380,6 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job, } } -typedef struct { - int ret; -} BackupCompleteData; - -static void backup_complete(Job *job, void *opaque) -{ - BackupCompleteData *data = opaque; - - job_completed(job, data->ret, NULL); - g_free(data); -} - static bool coroutine_fn yield_and_check(BackupBlockJob *job) { uint64_t delay_ns; @@ -480,60 +468,59 @@ static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) bdrv_dirty_iter_free(dbi); } -static void coroutine_fn backup_run(void *opaque) +static int coroutine_fn backup_run(Job *job, Error **errp) { - BackupBlockJob *job = opaque; - BackupCompleteData *data; - BlockDriverState *bs = blk_bs(job->common.blk); + BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); + BlockDriverState *bs = blk_bs(s->common.blk); int64_t offset, nb_clusters; int ret = 0; - QLIST_INIT(&job->inflight_reqs); - qemu_co_rwlock_init(&job->flush_rwlock); + QLIST_INIT(&s->inflight_reqs); + qemu_co_rwlock_init(&s->flush_rwlock); - nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size); - job_progress_set_remaining(&job->common.job, job->len); + nb_clusters = DIV_ROUND_UP(s->len, s->cluster_size); + job_progress_set_remaining(job, s->len); - job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); - if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { - backup_incremental_init_copy_bitmap(job); + s->copy_bitmap = hbitmap_alloc(nb_clusters, 0); + if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { + backup_incremental_init_copy_bitmap(s); } else { - hbitmap_set(job->copy_bitmap, 0, nb_clusters); + hbitmap_set(s->copy_bitmap, 0, nb_clusters); } - job->before_write.notify = backup_before_write_notify; - bdrv_add_before_write_notifier(bs, &job->before_write); + s->before_write.notify = backup_before_write_notify; + bdrv_add_before_write_notifier(bs, &s->before_write); - if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { + if (s->sync_mode == MIRROR_SYNC_MODE_NONE) { /* All bits are set in copy_bitmap to allow any cluster to be copied. * This does not actually require them to be copied. */ - while (!job_is_cancelled(&job->common.job)) { + while (!job_is_cancelled(job)) { /* Yield until the job is cancelled. We just let our before_write * notify callback service CoW requests. */ - job_yield(&job->common.job); + job_yield(job); } - } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { - ret = backup_run_incremental(job); + } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { + ret = backup_run_incremental(s); } else { /* Both FULL and TOP SYNC_MODE's require copying.. */ - for (offset = 0; offset < job->len; - offset += job->cluster_size) { + for (offset = 0; offset < s->len; + offset += s->cluster_size) { bool error_is_read; int alloced = 0; - if (yield_and_check(job)) { + if (yield_and_check(s)) { break; } - if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { + if (s->sync_mode == MIRROR_SYNC_MODE_TOP) { int i; int64_t n; /* Check to see if these blocks are already in the * backing file. */ - for (i = 0; i < job->cluster_size;) { + for (i = 0; i < s->cluster_size;) { /* bdrv_is_allocated() only returns true/false based * on the first set of sectors it comes across that * are are all in the same state. @@ -542,7 +529,7 @@ static void coroutine_fn backup_run(void *opaque) * needed but at some point that is always the case. */ alloced = bdrv_is_allocated(bs, offset + i, - job->cluster_size - i, &n); + s->cluster_size - i, &n); i += n; if (alloced || n == 0) { @@ -560,33 +547,31 @@ static void coroutine_fn backup_run(void *opaque) if (alloced < 0) { ret = alloced; } else { - ret = backup_do_cow(job, offset, job->cluster_size, + ret = backup_do_cow(s, offset, s->cluster_size, &error_is_read, false); } if (ret < 0) { /* Depending on error action, fail now or retry cluster */ BlockErrorAction action = - backup_error_action(job, error_is_read, -ret); + backup_error_action(s, error_is_read, -ret); if (action == BLOCK_ERROR_ACTION_REPORT) { break; } else { - offset -= job->cluster_size; + offset -= s->cluster_size; continue; } } } } - notifier_with_return_remove(&job->before_write); + notifier_with_return_remove(&s->before_write); /* wait until pending backup_do_cow() calls have completed */ - qemu_co_rwlock_wrlock(&job->flush_rwlock); - qemu_co_rwlock_unlock(&job->flush_rwlock); - hbitmap_free(job->copy_bitmap); + qemu_co_rwlock_wrlock(&s->flush_rwlock); + qemu_co_rwlock_unlock(&s->flush_rwlock); + hbitmap_free(s->copy_bitmap); - data = g_malloc(sizeof(*data)); - data->ret = ret; - job_defer_to_main_loop(&job->common.job, backup_complete, data); + return ret; } static const BlockJobDriver backup_job_driver = { @@ -596,7 +581,7 @@ static const BlockJobDriver backup_job_driver = { .free = block_job_free, .user_resume = block_job_user_resume, .drain = block_job_drain, - .start = backup_run, + .run = backup_run, .commit = backup_commit, .abort = backup_abort, .clean = backup_clean, diff --git a/block/commit.c b/block/commit.c index eb41457..da69165 100644 --- a/block/commit.c +++ b/block/commit.c @@ -68,19 +68,13 @@ static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base, return 0; } -typedef struct { - int ret; -} CommitCompleteData; - -static void commit_complete(Job *job, void *opaque) +static void commit_exit(Job *job) { CommitBlockJob *s = container_of(job, CommitBlockJob, common.job); BlockJob *bjob = &s->common; - CommitCompleteData *data = opaque; BlockDriverState *top = blk_bs(s->top); BlockDriverState *base = blk_bs(s->base); BlockDriverState *commit_top_bs = s->commit_top_bs; - int ret = data->ret; bool remove_commit_top_bs = false; /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */ @@ -91,10 +85,10 @@ static void commit_complete(Job *job, void *opaque) * the normal backing chain can be restored. */ blk_unref(s->base); - if (!job_is_cancelled(job) && ret == 0) { + if (!job_is_cancelled(job) && job->ret == 0) { /* success */ - ret = bdrv_drop_intermediate(s->commit_top_bs, base, - s->backing_file_str); + job->ret = bdrv_drop_intermediate(s->commit_top_bs, base, + s->backing_file_str); } else { /* XXX Can (or should) we somehow keep 'consistent read' blocked even * after the failed/cancelled commit job is gone? If we already wrote @@ -117,9 +111,6 @@ static void commit_complete(Job *job, void *opaque) * bdrv_set_backing_hd() to fail. */ block_job_remove_all_bdrv(bjob); - job_completed(job, ret, NULL); - g_free(data); - /* If bdrv_drop_intermediate() didn't already do that, remove the commit * filter driver from the backing chain. Do this as the final step so that * the 'consistent read' permission can be granted. */ @@ -134,10 +125,9 @@ static void commit_complete(Job *job, void *opaque) bdrv_unref(top); } -static void coroutine_fn commit_run(void *opaque) +static int coroutine_fn commit_run(Job *job, Error **errp) { - CommitBlockJob *s = opaque; - CommitCompleteData *data; + CommitBlockJob *s = container_of(job, CommitBlockJob, common.job); int64_t offset; uint64_t delay_ns = 0; int ret = 0; @@ -210,9 +200,7 @@ static void coroutine_fn commit_run(void *opaque) out: qemu_vfree(buf); - data = g_malloc(sizeof(*data)); - data->ret = ret; - job_defer_to_main_loop(&s->common.job, commit_complete, data); + return ret; } static const BlockJobDriver commit_job_driver = { @@ -222,7 +210,8 @@ static const BlockJobDriver commit_job_driver = { .free = block_job_free, .user_resume = block_job_user_resume, .drain = block_job_drain, - .start = commit_run, + .run = commit_run, + .exit = commit_exit, }, }; diff --git a/block/create.c b/block/create.c index 915cd41..9534121 100644 --- a/block/create.c +++ b/block/create.c @@ -34,33 +34,26 @@ typedef struct BlockdevCreateJob { Job common; BlockDriver *drv; BlockdevCreateOptions *opts; - int ret; - Error *err; } BlockdevCreateJob; -static void blockdev_create_complete(Job *job, void *opaque) +static int coroutine_fn blockdev_create_run(Job *job, Error **errp) { BlockdevCreateJob *s = container_of(job, BlockdevCreateJob, common); - - job_completed(job, s->ret, s->err); -} - -static void coroutine_fn blockdev_create_run(void *opaque) -{ - BlockdevCreateJob *s = opaque; + int ret; job_progress_set_remaining(&s->common, 1); - s->ret = s->drv->bdrv_co_create(s->opts, &s->err); + ret = s->drv->bdrv_co_create(s->opts, errp); job_progress_update(&s->common, 1); qapi_free_BlockdevCreateOptions(s->opts); - job_defer_to_main_loop(&s->common, blockdev_create_complete, NULL); + + return ret; } static const JobDriver blockdev_create_job_driver = { .instance_size = sizeof(BlockdevCreateJob), .job_type = JOB_TYPE_CREATE, - .start = blockdev_create_run, + .run = blockdev_create_run, }; void qmp_blockdev_create(const char *job_id, BlockdevCreateOptions *options, diff --git a/block/mirror.c b/block/mirror.c index 6cc10df..b8941db 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -607,26 +607,22 @@ static void mirror_wait_for_all_io(MirrorBlockJob *s) } } -typedef struct { - int ret; -} MirrorExitData; - -static void mirror_exit(Job *job, void *opaque) +static void mirror_exit(Job *job) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockJob *bjob = &s->common; - MirrorExitData *data = opaque; MirrorBDSOpaque *bs_opaque = s->mirror_top_bs->opaque; AioContext *replace_aio_context = NULL; BlockDriverState *src = s->mirror_top_bs->backing->bs; BlockDriverState *target_bs = blk_bs(s->target); BlockDriverState *mirror_top_bs = s->mirror_top_bs; Error *local_err = NULL; + int ret = job->ret; bdrv_release_dirty_bitmap(src, s->dirty_bitmap); - /* Make sure that the source BDS doesn't go away before we called - * job_completed(). */ + /* Make sure that the source BDS doesn't go away during bdrv_replace_node, + * before we can call bdrv_drained_end */ bdrv_ref(src); bdrv_ref(mirror_top_bs); bdrv_ref(target_bs); @@ -652,7 +648,7 @@ static void mirror_exit(Job *job, void *opaque) bdrv_set_backing_hd(target_bs, backing, &local_err); if (local_err) { error_report_err(local_err); - data->ret = -EPERM; + ret = -EPERM; } } } @@ -662,7 +658,7 @@ static void mirror_exit(Job *job, void *opaque) aio_context_acquire(replace_aio_context); } - if (s->should_complete && data->ret == 0) { + if (s->should_complete && ret == 0) { BlockDriverState *to_replace = src; if (s->to_replace) { to_replace = s->to_replace; @@ -679,7 +675,7 @@ static void mirror_exit(Job *job, void *opaque) bdrv_drained_end(target_bs); if (local_err) { error_report_err(local_err); - data->ret = -EPERM; + ret = -EPERM; } } if (s->to_replace) { @@ -710,12 +706,12 @@ static void mirror_exit(Job *job, void *opaque) blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); bs_opaque->job = NULL; - job_completed(job, data->ret, NULL); - g_free(data); bdrv_drained_end(src); bdrv_unref(mirror_top_bs); bdrv_unref(src); + + job->ret = ret; } static void mirror_throttle(MirrorBlockJob *s) @@ -812,10 +808,9 @@ static int mirror_flush(MirrorBlockJob *s) return ret; } -static void coroutine_fn mirror_run(void *opaque) +static int coroutine_fn mirror_run(Job *job, Error **errp) { - MirrorBlockJob *s = opaque; - MirrorExitData *data; + MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockDriverState *bs = s->mirror_top_bs->backing->bs; BlockDriverState *target_bs = blk_bs(s->target); bool need_drain = true; @@ -1035,13 +1030,11 @@ immediate_exit: g_free(s->in_flight_bitmap); bdrv_dirty_iter_free(s->dbi); - data = g_malloc(sizeof(*data)); - data->ret = ret; - if (need_drain) { bdrv_drained_begin(bs); } - job_defer_to_main_loop(&s->common.job, mirror_exit, data); + + return ret; } static void mirror_complete(Job *job, Error **errp) @@ -1138,7 +1131,8 @@ static const BlockJobDriver mirror_job_driver = { .free = block_job_free, .user_resume = block_job_user_resume, .drain = block_job_drain, - .start = mirror_run, + .run = mirror_run, + .exit = mirror_exit, .pause = mirror_pause, .complete = mirror_complete, }, @@ -1154,7 +1148,8 @@ static const BlockJobDriver commit_active_job_driver = { .free = block_job_free, .user_resume = block_job_user_resume, .drain = block_job_drain, - .start = mirror_run, + .run = mirror_run, + .exit = mirror_exit, .pause = mirror_pause, .complete = mirror_complete, }, diff --git a/block/stream.c b/block/stream.c index 9264b68..67e1e72 100644 --- a/block/stream.c +++ b/block/stream.c @@ -54,20 +54,16 @@ static int coroutine_fn stream_populate(BlockBackend *blk, return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ); } -typedef struct { - int ret; -} StreamCompleteData; - -static void stream_complete(Job *job, void *opaque) +static void stream_exit(Job *job) { StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); BlockJob *bjob = &s->common; - StreamCompleteData *data = opaque; BlockDriverState *bs = blk_bs(bjob->blk); BlockDriverState *base = s->base; Error *local_err = NULL; + int ret = job->ret; - if (!job_is_cancelled(job) && bs->backing && data->ret == 0) { + if (!job_is_cancelled(job) && bs->backing && ret == 0) { const char *base_id = NULL, *base_fmt = NULL; if (base) { base_id = s->backing_file_str; @@ -75,11 +71,11 @@ static void stream_complete(Job *job, void *opaque) base_fmt = base->drv->format_name; } } - data->ret = bdrv_change_backing_file(bs, base_id, base_fmt); + ret = bdrv_change_backing_file(bs, base_id, base_fmt); bdrv_set_backing_hd(bs, base, &local_err); if (local_err) { error_report_err(local_err); - data->ret = -EPERM; + ret = -EPERM; goto out; } } @@ -93,14 +89,12 @@ out: } g_free(s->backing_file_str); - job_completed(job, data->ret, NULL); - g_free(data); + job->ret = ret; } -static void coroutine_fn stream_run(void *opaque) +static int coroutine_fn stream_run(Job *job, Error **errp) { - StreamBlockJob *s = opaque; - StreamCompleteData *data; + StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); BlockBackend *blk = s->common.blk; BlockDriverState *bs = blk_bs(blk); BlockDriverState *base = s->base; @@ -203,9 +197,7 @@ static void coroutine_fn stream_run(void *opaque) out: /* Modify backing chain and close BDSes in main loop */ - data = g_malloc(sizeof(*data)); - data->ret = ret; - job_defer_to_main_loop(&s->common.job, stream_complete, data); + return ret; } static const BlockJobDriver stream_job_driver = { @@ -213,7 +205,8 @@ static const BlockJobDriver stream_job_driver = { .instance_size = sizeof(StreamBlockJob), .job_type = JOB_TYPE_STREAM, .free = block_job_free, - .start = stream_run, + .run = stream_run, + .exit = stream_exit, .user_resume = block_job_user_resume, .drain = block_job_drain, }, |