aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorHaozhong Zhang <haozhong.zhang@intel.com>2017-06-28 16:37:04 +0800
committerJuan Quintela <quintela@redhat.com>2017-06-28 12:23:58 +0200
commit084140bd4989095d39978269fd0f43e398a0015d (patch)
tree4a18368c0195ba881a42018767d4385f4a30c76f /include
parentc788ada8163127d20505e3ebf9b8735ba313d661 (diff)
downloadqemu-084140bd4989095d39978269fd0f43e398a0015d.zip
qemu-084140bd4989095d39978269fd0f43e398a0015d.tar.gz
qemu-084140bd4989095d39978269fd0f43e398a0015d.tar.bz2
exec: fix access to ram_list.dirty_memory when sync dirty bitmap
In cpu_physical_memory_sync_dirty_bitmap(rb, start, ...), the 2nd argument 'start' is relative to the start of the ramblock 'rb'. When it's used to access the dirty memory bitmap of ram_list (i.e. ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]->blocks[]), an offset to the start of all RAM (i.e. rb->offset) should be added to it, which has however been missed since c/s 6b6712efcc. For a ramblock of host memory backend whose offset is not zero, cpu_physical_memory_sync_dirty_bitmap() synchronizes the incorrect part of the dirty memory bitmap of ram_list to the per ramblock dirty bitmap. As a result, a guest with host memory backend may crash after migration. Fix it by adding the offset of ramblock when accessing the dirty memory bitmap of ram_list in cpu_physical_memory_sync_dirty_bitmap(). Reported-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com> Message-Id: <20170628083704.24997-1-haozhong.zhang@intel.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Tested-by: Juan Quintela <quintela@redhat.com> Tested-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/exec/ram_addr.h9
1 files changed, 6 insertions, 3 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 73d1bea..c04f4f6 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -386,8 +386,9 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long * const *src;
- unsigned long idx = (page * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
- unsigned long offset = BIT_WORD((page * BITS_PER_LONG) %
+ unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
+ unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
+ unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
DIRTY_MEMORY_BLOCK_SIZE);
rcu_read_lock();
@@ -414,9 +415,11 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
rcu_read_unlock();
} else {
+ ram_addr_t offset = rb->offset;
+
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
- start + addr,
+ start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
*real_dirty_pages += 1;