aboutsummaryrefslogtreecommitdiff
path: root/block/mirror.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-11-09 17:20:08 +0100
committerJeff Cody <jcody@redhat.com>2016-11-14 22:49:26 -0500
commitbdffb31d8eece1cbd4d88f136daccfe1f93a1bf6 (patch)
tree86371e7fba7fc7ab8530563ffa564b6f78c4b26e /block/mirror.c
parent4e504535c16dfa66290281e704384abfaca08673 (diff)
downloadqemu-bdffb31d8eece1cbd4d88f136daccfe1f93a1bf6.zip
qemu-bdffb31d8eece1cbd4d88f136daccfe1f93a1bf6.tar.gz
qemu-bdffb31d8eece1cbd4d88f136daccfe1f93a1bf6.tar.bz2
mirror: do not flush every time the disks are synced
This puts a huge strain on the disks when there are many concurrent migrations. With this patch we only flush twice: just before issuing the event, and just before pivoting to the destination. If management will complete the job close to the BLOCK_JOB_READY event, the cost of the second flush should be small anyway. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-id: 20161109162008.27287-2-pbonzini@redhat.com Signed-off-by: Jeff Cody <jcody@redhat.com>
Diffstat (limited to 'block/mirror.c')
-rw-r--r--block/mirror.c40
1 files changed, 25 insertions, 15 deletions
diff --git a/block/mirror.c b/block/mirror.c
index 62ac87f..301ba92 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -615,6 +615,20 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
return 0;
}
+/* Called when going out of the streaming phase to flush the bulk of the
+ * data to the medium, or just before completing.
+ */
+static int mirror_flush(MirrorBlockJob *s)
+{
+ int ret = blk_flush(s->target);
+ if (ret < 0) {
+ if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
+ s->ret = ret;
+ }
+ }
+ return ret;
+}
+
static void coroutine_fn mirror_run(void *opaque)
{
MirrorBlockJob *s = opaque;
@@ -727,27 +741,23 @@ static void coroutine_fn mirror_run(void *opaque)
should_complete = false;
if (s->in_flight == 0 && cnt == 0) {
trace_mirror_before_flush(s);
- ret = blk_flush(s->target);
- if (ret < 0) {
- if (mirror_error_action(s, false, -ret) ==
- BLOCK_ERROR_ACTION_REPORT) {
- goto immediate_exit;
+ if (!s->synced) {
+ if (mirror_flush(s) < 0) {
+ /* Go check s->ret. */
+ continue;
}
- } else {
/* We're out of the streaming phase. From now on, if the job
* is cancelled we will actually complete all pending I/O and
* report completion. This way, block-job-cancel will leave
* the target in a consistent state.
*/
- if (!s->synced) {
- block_job_event_ready(&s->common);
- s->synced = true;
- }
-
- should_complete = s->should_complete ||
- block_job_is_cancelled(&s->common);
- cnt = bdrv_get_dirty_count(s->dirty_bitmap);
+ block_job_event_ready(&s->common);
+ s->synced = true;
}
+
+ should_complete = s->should_complete ||
+ block_job_is_cancelled(&s->common);
+ cnt = bdrv_get_dirty_count(s->dirty_bitmap);
}
if (cnt == 0 && should_complete) {
@@ -765,7 +775,7 @@ static void coroutine_fn mirror_run(void *opaque)
bdrv_drained_begin(bs);
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
- if (cnt > 0) {
+ if (cnt > 0 || mirror_flush(s) < 0) {
bdrv_drained_end(bs);
continue;
}