aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@tencent.com>2018-03-30 15:51:24 +0800
committerDr. David Alan Gilbert <dgilbert@redhat.com>2018-04-25 18:04:09 +0100
commit1faa5665c0f1df2eff291454a3a85625a3bc93dd (patch)
tree0fb914c8f46744725e91391d3df90b9bb50e5e27 /migration
parent059ff0fb29dd3a56ac2843676915efc279938c6b (diff)
downloadqemu-1faa5665c0f1df2eff291454a3a85625a3bc93dd.zip
qemu-1faa5665c0f1df2eff291454a3a85625a3bc93dd.tar.gz
qemu-1faa5665c0f1df2eff291454a3a85625a3bc93dd.tar.bz2
migration: move some code to ram_save_host_page
Move some code from ram_save_target_page() to ram_save_host_page() to make it be more readable for latter patches that dramatically clean ram_save_target_page() up Reviewed-by: Peter Xu <peterx@redhat.com> Signed-off-by: Xiao Guangrong <xiaoguangrong@tencent.com> Message-Id: <20180330075128.26919-7-xiaoguangrong@tencent.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/ram.c43
1 files changed, 19 insertions, 24 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 79c7958..c3628b0 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1483,38 +1483,23 @@ err:
* Returns the number of pages written
*
* @rs: current RAM state
- * @ms: current migration state
* @pss: data about the page we want to send
* @last_stage: if we are at the completion stage
*/
static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
bool last_stage)
{
- int res = 0;
-
- /* Check the pages is dirty and if it is send it */
- if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
- /*
- * If xbzrle is on, stop using the data compression after first
- * round of migration even if compression is enabled. In theory,
- * xbzrle can do better than compression.
- */
- if (migrate_use_compression() &&
- (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
- res = ram_save_compressed_page(rs, pss, last_stage);
- } else {
- res = ram_save_page(rs, pss, last_stage);
- }
-
- if (res < 0) {
- return res;
- }
- if (pss->block->unsentmap) {
- clear_bit(pss->page, pss->block->unsentmap);
- }
+ /*
+ * If xbzrle is on, stop using the data compression after first
+ * round of migration even if compression is enabled. In theory,
+ * xbzrle can do better than compression.
+ */
+ if (migrate_use_compression() &&
+ (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
+ return ram_save_compressed_page(rs, pss, last_stage);
}
- return res;
+ return ram_save_page(rs, pss, last_stage);
}
/**
@@ -1543,12 +1528,22 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
do {
+ /* Check the pages is dirty and if it is send it */
+ if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
+ pss->page++;
+ continue;
+ }
+
tmppages = ram_save_target_page(rs, pss, last_stage);
if (tmppages < 0) {
return tmppages;
}
pages += tmppages;
+ if (pss->block->unsentmap) {
+ clear_bit(pss->page, pss->block->unsentmap);
+ }
+
pss->page++;
} while ((pss->page & (pagesize_bits - 1)) &&
offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));