diff options
author | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2014-02-13 19:44:45 +0000 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2014-02-25 14:30:28 +0100 |
commit | 6d3cb1f970ee85361618f7ff02869180394e012d (patch) | |
tree | ba96f77f01344e8efc15eea039358fc4f3bbe719 /arch_init.c | |
parent | 24a370ef2351dc596a7e47508b952ddfba79ef94 (diff) | |
download | qemu-6d3cb1f970ee85361618f7ff02869180394e012d.zip qemu-6d3cb1f970ee85361618f7ff02869180394e012d.tar.gz qemu-6d3cb1f970ee85361618f7ff02869180394e012d.tar.bz2 |
Fix two XBZRLE corruption issues
Push zero'd pages into the XBZRLE cache
A page that was cached by XBZRLE, zero'd and then XBZRLE'd again
was being compared against a stale cache value
Don't use 'qemu_put_buffer_async' to put pages from the XBZRLE cache
Since the cache might change before the data hits the wire
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'arch_init.c')
-rw-r--r-- | arch_init.c | 64 |
1 files changed, 49 insertions, 15 deletions
diff --git a/arch_init.c b/arch_init.c index 80574a0..fe17279 100644 --- a/arch_init.c +++ b/arch_init.c @@ -122,7 +122,6 @@ static void check_guest_throttling(void); #define RAM_SAVE_FLAG_XBZRLE 0x40 /* 0x80 is reserved in migration.h start with 0x100 next */ - static struct defconfig_file { const char *filename; /* Indicates it is an user config file (disabled by -no-user-config) */ @@ -133,6 +132,7 @@ static struct defconfig_file { { NULL }, /* end of list */ }; +static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE]; int qemu_read_default_config_files(bool userconfig) { @@ -273,6 +273,34 @@ static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset, return size; } +/* This is the last block that we have visited serching for dirty pages + */ +static RAMBlock *last_seen_block; +/* This is the last block from where we have sent data */ +static RAMBlock *last_sent_block; +static ram_addr_t last_offset; +static unsigned long *migration_bitmap; +static uint64_t migration_dirty_pages; +static uint32_t last_version; +static bool ram_bulk_stage; + +/* Update the xbzrle cache to reflect a page that's been sent as all 0. + * The important thing is that a stale (not-yet-0'd) page be replaced + * by the new data. + * As a bonus, if the page wasn't in the cache it gets added so that + * when a small write is made into the 0'd page it gets XBZRLE sent + */ +static void xbzrle_cache_zero_page(ram_addr_t current_addr) +{ + if (ram_bulk_stage || !migrate_use_xbzrle()) { + return; + } + + /* We don't care if this fails to allocate a new cache page + * as long as it updated an old one */ + cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE); +} + #define ENCODING_FLAG_XBZRLE 0x1 static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data, @@ -329,18 +357,6 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data, return bytes_sent; } - -/* This is the last block that we have visited serching for dirty pages - */ -static RAMBlock *last_seen_block; -/* This is the last block from where we have sent data */ -static RAMBlock *last_sent_block; -static ram_addr_t last_offset; -static unsigned long *migration_bitmap; -static uint64_t migration_dirty_pages; -static uint32_t last_version; -static bool ram_bulk_stage; - static inline ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr, ram_addr_t start) @@ -512,6 +528,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage) } else { int ret; uint8_t *p; + bool send_async = true; int cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0; @@ -522,6 +539,7 @@ static int ram_save_block(QEMUFile *f, bool last_stage) ret = ram_control_save_page(f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_sent); + current_addr = block->offset + offset; if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_DELAYED) { if (bytes_sent > 0) { @@ -536,19 +554,35 @@ static int ram_save_block(QEMUFile *f, bool last_stage) RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, 0); bytes_sent++; + /* Must let xbzrle know, otherwise a previous (now 0'd) cached + * page would be stale + */ + xbzrle_cache_zero_page(current_addr); } else if (!ram_bulk_stage && migrate_use_xbzrle()) { - current_addr = block->offset + offset; bytes_sent = save_xbzrle_page(f, p, current_addr, block, offset, cont, last_stage); if (!last_stage) { + /* We must send exactly what's in the xbzrle cache + * even if the page wasn't xbzrle compressed, so that + * it's right next time. + */ p = get_cached_data(XBZRLE.cache, current_addr); + + /* Can't send this cached data async, since the cache page + * might get updated before it gets to the wire + */ + send_async = false; } } /* XBZRLE overflow or normal page */ if (bytes_sent == -1) { bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE); - qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE); + if (send_async) { + qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE); + } else { + qemu_put_buffer(f, p, TARGET_PAGE_SIZE); + } bytes_sent += TARGET_PAGE_SIZE; acct_info.norm_pages++; } |