aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorGavin Shan <gshan@redhat.com>2023-05-09 12:21:19 +1000
committerPaolo Bonzini <pbonzini@redhat.com>2023-05-18 08:53:50 +0200
commit1e493be58708c3003d9e38b09eedf0134c0ca9fe (patch)
tree70a7d9d6d6fbb67f0a1b525d3e12fefb2148c024 /migration
parented8d95182bc994e31e730c59e1c8bfec4822b27d (diff)
downloadqemu-1e493be58708c3003d9e38b09eedf0134c0ca9fe.zip
qemu-1e493be58708c3003d9e38b09eedf0134c0ca9fe.tar.gz
qemu-1e493be58708c3003d9e38b09eedf0134c0ca9fe.tar.bz2
migration: Add last stage indicator to global dirty log
The global dirty log synchronization is used when KVM and dirty ring are enabled. There is a particularity for ARM64 where the backup bitmap is used to track dirty pages in non-running-vcpu situations. It means the dirty ring works with the combination of ring buffer and backup bitmap. The dirty bits in the backup bitmap needs to collected in the last stage of live migration. In order to identify the last stage of live migration and pass it down, an extra parameter is added to the relevant functions and callbacks. This last stage indicator isn't used until the dirty ring is enabled in the subsequent patches. No functional change intended. Signed-off-by: Gavin Shan <gshan@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Tested-by: Zhenyu Zhang <zhenyzha@redhat.com> Message-Id: <20230509022122.20888-2-gshan@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/dirtyrate.c4
-rw-r--r--migration/ram.c20
2 files changed, 12 insertions, 12 deletions
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 0220db8..84f1b0f 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -101,7 +101,7 @@ void global_dirty_log_change(unsigned int flag, bool start)
static void global_dirty_log_sync(unsigned int flag, bool one_shot)
{
qemu_mutex_lock_iothread();
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
if (one_shot) {
memory_global_dirty_log_stop(flag);
}
@@ -581,7 +581,7 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config)
* skip it unconditionally and start dirty tracking
* from 2'round of log sync
*/
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
/*
* reset page protect manually and unconditionally.
diff --git a/migration/ram.c b/migration/ram.c
index f69d8d4..5900cab 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1039,7 +1039,7 @@ static void migration_trigger_throttle(RAMState *rs)
}
}
-static void migration_bitmap_sync(RAMState *rs)
+static void migration_bitmap_sync(RAMState *rs, bool last_stage)
{
RAMBlock *block;
int64_t end_time;
@@ -1051,7 +1051,7 @@ static void migration_bitmap_sync(RAMState *rs)
}
trace_migration_bitmap_sync_start();
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(last_stage);
qemu_mutex_lock(&rs->bitmap_mutex);
WITH_RCU_READ_LOCK_GUARD() {
@@ -1086,7 +1086,7 @@ static void migration_bitmap_sync(RAMState *rs)
}
}
-static void migration_bitmap_sync_precopy(RAMState *rs)
+static void migration_bitmap_sync_precopy(RAMState *rs, bool last_stage)
{
Error *local_err = NULL;
@@ -1099,7 +1099,7 @@ static void migration_bitmap_sync_precopy(RAMState *rs)
local_err = NULL;
}
- migration_bitmap_sync(rs);
+ migration_bitmap_sync(rs, last_stage);
if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
error_report_err(local_err);
@@ -2699,7 +2699,7 @@ void ram_postcopy_send_discard_bitmap(MigrationState *ms)
RCU_READ_LOCK_GUARD();
/* This should be our last sync, the src is now paused */
- migration_bitmap_sync(rs);
+ migration_bitmap_sync(rs, false);
/* Easiest way to make sure we don't resume in the middle of a host-page */
rs->pss[RAM_CHANNEL_PRECOPY].last_sent_block = NULL;
@@ -2890,7 +2890,7 @@ static void ram_init_bitmaps(RAMState *rs)
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION);
- migration_bitmap_sync_precopy(rs);
+ migration_bitmap_sync_precopy(rs, false);
}
}
qemu_mutex_unlock_ramlist();
@@ -3214,7 +3214,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) {
- migration_bitmap_sync_precopy(rs);
+ migration_bitmap_sync_precopy(rs, true);
}
ram_control_before_iterate(f, RAM_CONTROL_FINISH);
@@ -3288,7 +3288,7 @@ static void ram_state_pending_exact(void *opaque, uint64_t *must_precopy,
if (!migration_in_postcopy() && remaining_size < s->threshold_size) {
qemu_mutex_lock_iothread();
WITH_RCU_READ_LOCK_GUARD() {
- migration_bitmap_sync_precopy(rs);
+ migration_bitmap_sync_precopy(rs, false);
}
qemu_mutex_unlock_iothread();
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
@@ -3523,7 +3523,7 @@ void colo_incoming_start_dirty_log(void)
qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist();
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
ramblock_sync_dirty_bitmap(ram_state, block);
@@ -3813,7 +3813,7 @@ void colo_flush_ram_cache(void)
void *src_host;
unsigned long offset = 0;
- memory_global_dirty_log_sync();
+ memory_global_dirty_log_sync(false);
qemu_mutex_lock(&ram_state->bitmap_mutex);
WITH_RCU_READ_LOCK_GUARD() {
RAMBLOCK_FOREACH_NOT_IGNORED(block) {