aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-11-09 09:41:31 +0100
committerRichard Henderson <richard.henderson@linaro.org>2021-11-09 09:41:31 +0100
commit85549204552b624fe254831537e7a0f6450228b8 (patch)
treeb9f6324b68985c0fc8ef22919e26bd8958125cd3
parentf10e7b9f6fc18be390b3bc189e04b5147eb8dbf8 (diff)
parent91fe9a8dbd449a2f333aefb82ec8adb1f6424408 (diff)
downloadqemu-85549204552b624fe254831537e7a0f6450228b8.zip
qemu-85549204552b624fe254831537e7a0f6450228b8.tar.gz
qemu-85549204552b624fe254831537e7a0f6450228b8.tar.bz2
Merge remote-tracking branch 'remotes/juanquintela/tags/migration-20211109-pull-request' into staging
Migration Pull request Hi This pull request includes: - fix sample-pages doc by hyman - cleanup colo pages by contiguous blocks by Rao - reset auto-converge by checkpoint by Rao. Please, apply. # gpg: Signature made Tue 09 Nov 2021 09:02:37 AM CET # gpg: using RSA key 1899FF8EDEBF58CCEE034B82F487EF185872D723 # gpg: Good signature from "Juan Quintela <quintela@redhat.com>" [full] # gpg: aka "Juan Quintela <quintela@trasno.org>" [full] * remotes/juanquintela/tags/migration-20211109-pull-request: Reset the auto-converge counter at every checkpoint. Reduce the PVM stop time during Checkpoint docs: fix 'sample-pages' option tag Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--migration/colo.c4
-rw-r--r--migration/ram.c57
-rw-r--r--migration/ram.h1
-rw-r--r--qapi/migration.json2
4 files changed, 60 insertions, 4 deletions
diff --git a/migration/colo.c b/migration/colo.c
index e3b1f13..2415325 100644
--- a/migration/colo.c
+++ b/migration/colo.c
@@ -459,6 +459,10 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
if (ret < 0) {
goto out;
}
+
+ if (migrate_auto_converge()) {
+ mig_throttle_counter_reset();
+ }
/*
* Only save VM's live state, which not including device state.
* TODO: We may need a timeout mechanism to prevent COLO process
diff --git a/migration/ram.c b/migration/ram.c
index 847af46..863035d 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -641,6 +641,15 @@ static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
}
}
+void mig_throttle_counter_reset(void)
+{
+ RAMState *rs = ram_state;
+
+ rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ rs->num_dirty_pages_period = 0;
+ rs->bytes_xfer_prev = ram_counters.transferred;
+}
+
/**
* xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
*
@@ -836,6 +845,41 @@ migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb,
}
}
+/*
+ * colo_bitmap_find_diry:find contiguous dirty pages from start
+ *
+ * Returns the page offset within memory region of the start of the contiguout
+ * dirty page
+ *
+ * @rs: current RAM state
+ * @rb: RAMBlock where to search for dirty pages
+ * @start: page where we start the search
+ * @num: the number of contiguous dirty pages
+ */
+static inline
+unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
+ unsigned long start, unsigned long *num)
+{
+ unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
+ unsigned long *bitmap = rb->bmap;
+ unsigned long first, next;
+
+ *num = 0;
+
+ if (ramblock_is_ignored(rb)) {
+ return size;
+ }
+
+ first = find_next_bit(bitmap, size, start);
+ if (first >= size) {
+ return first;
+ }
+ next = find_next_zero_bit(bitmap, size, first + 1);
+ assert(next >= first);
+ *num = next - first;
+ return first;
+}
+
static inline bool migration_bitmap_clear_dirty(RAMState *rs,
RAMBlock *rb,
unsigned long page)
@@ -3886,19 +3930,26 @@ void colo_flush_ram_cache(void)
block = QLIST_FIRST_RCU(&ram_list.blocks);
while (block) {
- offset = migration_bitmap_find_dirty(ram_state, block, offset);
+ unsigned long num = 0;
+ offset = colo_bitmap_find_dirty(ram_state, block, offset, &num);
if (!offset_in_ramblock(block,
((ram_addr_t)offset) << TARGET_PAGE_BITS)) {
offset = 0;
+ num = 0;
block = QLIST_NEXT_RCU(block, next);
} else {
- migration_bitmap_clear_dirty(ram_state, block, offset);
+ unsigned long i = 0;
+
+ for (i = 0; i < num; i++) {
+ migration_bitmap_clear_dirty(ram_state, block, offset + i);
+ }
dst_host = block->host
+ (((ram_addr_t)offset) << TARGET_PAGE_BITS);
src_host = block->colo_cache
+ (((ram_addr_t)offset) << TARGET_PAGE_BITS);
- memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num);
+ offset += num;
}
}
}
diff --git a/migration/ram.h b/migration/ram.h
index dda1988..c515396 100644
--- a/migration/ram.h
+++ b/migration/ram.h
@@ -50,6 +50,7 @@ bool ramblock_is_ignored(RAMBlock *block);
int xbzrle_cache_resize(uint64_t new_size, Error **errp);
uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void);
+void mig_throttle_counter_reset(void);
uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
diff --git a/qapi/migration.json b/qapi/migration.json
index f0aefda..bbfd48c 100644
--- a/qapi/migration.json
+++ b/qapi/migration.json
@@ -1796,7 +1796,7 @@
# @calc-time: time in units of second for sample dirty pages
#
# @sample-pages: page count per GB for sample dirty pages
-# the default value is 512 (since 6.2)
+# the default value is 512 (since 6.1)
#
# @mode: mode containing method of calculate dirtyrate includes
# 'page-sampling' and 'dirty-ring' (Since 6.2)