aboutsummaryrefslogtreecommitdiff
path: root/block/stream.c
diff options
context:
space:
mode:
authorJohn Snow <jsnow@redhat.com>2018-08-29 21:57:31 -0400
committerMax Reitz <mreitz@redhat.com>2018-08-31 16:28:33 +0200
commiteb23654dbe43b549ea2a9ebff9d8edf544d34a73 (patch)
tree4801dd246bf808450c131683b57fd50c1f8dbcaf /block/stream.c
parent7b508f6b7a38a8d9729772fa6e525da883fb120b (diff)
downloadqemu-eb23654dbe43b549ea2a9ebff9d8edf544d34a73.zip
qemu-eb23654dbe43b549ea2a9ebff9d8edf544d34a73.tar.gz
qemu-eb23654dbe43b549ea2a9ebff9d8edf544d34a73.tar.bz2
jobs: utilize job_exit shim
Utilize the job_exit shim by not calling job_defer_to_main_loop, and where applicable, converting the deferred callback into the job_exit callback. This converts backup, stream, create, and the unit tests all at once. Most of these jobs do not see any changes to the order in which they clean up their resources, except the test-blockjob-txn test, which now puts down its bs before job_completed is called. This is safe for the same reason the reordering in the mirror job is safe, because job_completed no longer runs under two locks, making the unref safe even if it causes a flush. Signed-off-by: John Snow <jsnow@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: 20180830015734.19765-7-jsnow@redhat.com Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block/stream.c')
-rw-r--r--block/stream.c22
1 files changed, 7 insertions, 15 deletions
diff --git a/block/stream.c b/block/stream.c
index 26a7753..67e1e72 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -54,20 +54,16 @@ static int coroutine_fn stream_populate(BlockBackend *blk,
return blk_co_preadv(blk, offset, qiov.size, &qiov, BDRV_REQ_COPY_ON_READ);
}
-typedef struct {
- int ret;
-} StreamCompleteData;
-
-static void stream_complete(Job *job, void *opaque)
+static void stream_exit(Job *job)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
BlockJob *bjob = &s->common;
- StreamCompleteData *data = opaque;
BlockDriverState *bs = blk_bs(bjob->blk);
BlockDriverState *base = s->base;
Error *local_err = NULL;
+ int ret = job->ret;
- if (!job_is_cancelled(job) && bs->backing && data->ret == 0) {
+ if (!job_is_cancelled(job) && bs->backing && ret == 0) {
const char *base_id = NULL, *base_fmt = NULL;
if (base) {
base_id = s->backing_file_str;
@@ -75,11 +71,11 @@ static void stream_complete(Job *job, void *opaque)
base_fmt = base->drv->format_name;
}
}
- data->ret = bdrv_change_backing_file(bs, base_id, base_fmt);
+ ret = bdrv_change_backing_file(bs, base_id, base_fmt);
bdrv_set_backing_hd(bs, base, &local_err);
if (local_err) {
error_report_err(local_err);
- data->ret = -EPERM;
+ ret = -EPERM;
goto out;
}
}
@@ -93,14 +89,12 @@ out:
}
g_free(s->backing_file_str);
- job_completed(job, data->ret);
- g_free(data);
+ job->ret = ret;
}
static int coroutine_fn stream_run(Job *job, Error **errp)
{
StreamBlockJob *s = container_of(job, StreamBlockJob, common.job);
- StreamCompleteData *data;
BlockBackend *blk = s->common.blk;
BlockDriverState *bs = blk_bs(blk);
BlockDriverState *base = s->base;
@@ -203,9 +197,6 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
out:
/* Modify backing chain and close BDSes in main loop */
- data = g_malloc(sizeof(*data));
- data->ret = ret;
- job_defer_to_main_loop(&s->common.job, stream_complete, data);
return ret;
}
@@ -215,6 +206,7 @@ static const BlockJobDriver stream_job_driver = {
.job_type = JOB_TYPE_STREAM,
.free = block_job_free,
.run = stream_run,
+ .exit = stream_exit,
.user_resume = block_job_user_resume,
.drain = block_job_drain,
},