aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2016-06-16 17:56:29 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2016-06-20 14:25:41 +0100
commit5ab4b69ce29908b327a91966dc78ea0fd7424075 (patch)
treea7b7d329f19dc31dd39126b36798f4ad83b1e963 /block
parent565ac01f8d35236844dd0257a185f81425cd4b6a (diff)
downloadqemu-5ab4b69ce29908b327a91966dc78ea0fd7424075.zip
qemu-5ab4b69ce29908b327a91966dc78ea0fd7424075.tar.gz
qemu-5ab4b69ce29908b327a91966dc78ea0fd7424075.tar.bz2
backup: follow AioContext change gracefully
Move s->target to the new AioContext when there is an AioContext change. The backup_run() coroutine does not use asynchronous I/O so there is no need to wait for in-flight requests in a BlockJobDriver->pause() callback. Guest writes are intercepted by the backup job. Treat them as guest activity and do it even while the job is paused. This is necessary since the only alternative would be to fail a job that experienced guest writes during pause once the job is resumed. In practice the guest writes don't interfere with AioContext switching since bdrv_drain() is used by bdrv_set_aio_context(). Loops already contain pause points because of block_job_sleep_ns() calls in the yield_and_check() helper function. It is necessary to convert a raw qemu_coroutine_yield() to block_job_yield() so the MIRROR_SYNC_MODE_NONE case can pause. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Message-id: 1466096189-6477-9-git-send-email-stefanha@redhat.com
Diffstat (limited to 'block')
-rw-r--r--block/backup.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/block/backup.c b/block/backup.c
index feeb9f8..581269b 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -246,12 +246,20 @@ static void backup_abort(BlockJob *job)
}
}
+static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
+{
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+
+ blk_set_aio_context(s->target, aio_context);
+}
+
static const BlockJobDriver backup_job_driver = {
- .instance_size = sizeof(BackupBlockJob),
- .job_type = BLOCK_JOB_TYPE_BACKUP,
- .set_speed = backup_set_speed,
- .commit = backup_commit,
- .abort = backup_abort,
+ .instance_size = sizeof(BackupBlockJob),
+ .job_type = BLOCK_JOB_TYPE_BACKUP,
+ .set_speed = backup_set_speed,
+ .commit = backup_commit,
+ .abort = backup_abort,
+ .attached_aio_context = backup_attached_aio_context,
};
static BlockErrorAction backup_error_action(BackupBlockJob *job,
@@ -392,9 +400,7 @@ static void coroutine_fn backup_run(void *opaque)
while (!block_job_is_cancelled(&job->common)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
- job->common.busy = false;
- qemu_coroutine_yield();
- job->common.busy = true;
+ block_job_yield(&job->common);
}
} else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
ret = backup_run_incremental(job);