aboutsummaryrefslogtreecommitdiff
path: root/migration/savevm.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2017-05-22 14:57:04 +0100
committerKevin Wolf <kwolf@redhat.com>2017-06-26 14:51:13 +0200
commit8649f2f9b2d83b627199babc52b454db136e253c (patch)
tree1f92ce37f7e4cf8074b326114e6d2a23b641c9df /migration/savevm.c
parent17e2a4a47d46dc9c33d5946cbdc1ceb15e34b5ac (diff)
downloadqemu-8649f2f9b2d83b627199babc52b454db136e253c.zip
qemu-8649f2f9b2d83b627199babc52b454db136e253c.tar.gz
qemu-8649f2f9b2d83b627199babc52b454db136e253c.tar.bz2
migration: use bdrv_drain_all_begin/end() instead bdrv_drain_all()
blk/bdrv_drain_all() only takes effect for a single instant and then resumes block jobs, guest devices, and other external clients like the NBD server. This can be handy when performing a synchronous drain before terminating the program, for example. Monitor commands usually need to quiesce I/O across an entire code region so blk/bdrv_drain_all() is not suitable. They must use bdrv_drain_all_begin/end() to mark the region. This prevents new I/O requests from slipping in or worse - block jobs completing and modifying the graph. I audited other blk/bdrv_drain_all() callers but did not find anything that needs a similar fix. This patch fixes the savevm/loadvm commands. Although I haven't encountered a read world issue this makes the code safer. Suggested-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'migration/savevm.c')
-rw-r--r--migration/savevm.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/migration/savevm.c b/migration/savevm.c
index 5846d9c..b08df04 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -2107,6 +2107,8 @@ int save_snapshot(const char *name, Error **errp)
}
vm_stop(RUN_STATE_SAVE_VM);
+ bdrv_drain_all_begin();
+
aio_context_acquire(aio_context);
memset(sn, 0, sizeof(*sn));
@@ -2165,6 +2167,9 @@ int save_snapshot(const char *name, Error **errp)
if (aio_context) {
aio_context_release(aio_context);
}
+
+ bdrv_drain_all_end();
+
if (saved_vm_running) {
vm_start();
}
@@ -2273,20 +2278,21 @@ int load_snapshot(const char *name, Error **errp)
}
/* Flush all IO requests so they don't interfere with the new state. */
- bdrv_drain_all();
+ bdrv_drain_all_begin();
ret = bdrv_all_goto_snapshot(name, &bs);
if (ret < 0) {
error_setg(errp, "Error %d while activating snapshot '%s' on '%s'",
ret, name, bdrv_get_device_name(bs));
- return ret;
+ goto err_drain;
}
/* restore the VM state */
f = qemu_fopen_bdrv(bs_vm_state, 0);
if (!f) {
error_setg(errp, "Could not open VM state file");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_drain;
}
qemu_system_reset(SHUTDOWN_CAUSE_NONE);
@@ -2296,6 +2302,8 @@ int load_snapshot(const char *name, Error **errp)
ret = qemu_loadvm_state(f);
aio_context_release(aio_context);
+ bdrv_drain_all_end();
+
migration_incoming_state_destroy();
if (ret < 0) {
error_setg(errp, "Error %d while loading VM state", ret);
@@ -2303,6 +2311,10 @@ int load_snapshot(const char *name, Error **errp)
}
return 0;
+
+err_drain:
+ bdrv_drain_all_end();
+ return ret;
}
void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev)