aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-03-23 11:56:01 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2015-06-05 17:09:59 +0200
commit58d2707e8713ef17b89b8b4c9ce586c76655a385 (patch)
treee0ce5162d9d34b8b97afe99666be13ee265c6620 /exec.c
parentfc377bcf617a48233a99a9fe0a26247c38b5cb76 (diff)
downloadqemu-58d2707e8713ef17b89b8b4c9ce586c76655a385.zip
qemu-58d2707e8713ef17b89b8b4c9ce586c76655a385.tar.gz
qemu-58d2707e8713ef17b89b8b4c9ce586c76655a385.tar.bz2
exec: pass client mask to cpu_physical_memory_set_dirty_range
This cuts in half the cost of bitmap operations (which will become more expensive when made atomic) during migration on non-VRAM regions. Reviewed-by: Fam Zheng <famz@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/exec.c b/exec.c
index 650cfa8..fe137bd 100644
--- a/exec.c
+++ b/exec.c
@@ -1351,7 +1351,8 @@ int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
block->used_length = newsize;
- cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
+ cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
+ DIRTY_CLIENTS_ALL);
memory_region_set_size(block->mr, newsize);
if (block->resized) {
block->resized(block->idstr, newsize, block->host);
@@ -1425,7 +1426,8 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
}
}
cpu_physical_memory_set_dirty_range(new_block->offset,
- new_block->used_length);
+ new_block->used_length,
+ DIRTY_CLIENTS_ALL);
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
@@ -1813,7 +1815,11 @@ static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
default:
abort();
}
- cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
+ /* Set both VGA and migration bits for simplicity and to remove
+ * the notdirty callback faster.
+ */
+ cpu_physical_memory_set_dirty_range(ram_addr, size,
+ DIRTY_CLIENTS_NOCODE);
/* we remove the notdirty callback only if the code has been
flushed */
if (!cpu_physical_memory_is_clean(ram_addr)) {
@@ -2259,9 +2265,7 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
tb_invalidate_phys_range(addr, addr + length);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
- if (dirty_log_mask) {
- cpu_physical_memory_set_dirty_range_nocode(addr, length);
- }
+ cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
} else {
xen_modified_memory(addr, length);
}
@@ -3014,9 +3018,7 @@ void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
- if (dirty_log_mask) {
- cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
- }
+ cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
r = MEMTX_OK;
}
if (result) {