aboutsummaryrefslogtreecommitdiff
path: root/migration/ram.c
diff options
context:
space:
mode:
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c66
1 files changed, 49 insertions, 17 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 02cfd76..31f4a9d 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2994,7 +2994,6 @@ int colo_init_ram_cache(void)
}
return -errno;
}
- memcpy(block->colo_cache, block->host, block->used_length);
}
}
@@ -3008,19 +3007,36 @@ int colo_init_ram_cache(void)
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
-
block->bmap = bitmap_new(pages);
- bitmap_set(block->bmap, 0, pages);
}
}
- ram_state = g_new0(RAMState, 1);
- ram_state->migration_dirty_pages = 0;
- qemu_mutex_init(&ram_state->bitmap_mutex);
- memory_global_dirty_log_start();
+ ram_state_init(&ram_state);
return 0;
}
+/* TODO: duplicated with ram_init_bitmaps */
+void colo_incoming_start_dirty_log(void)
+{
+ RAMBlock *block = NULL;
+ /* For memory_global_dirty_log_start below. */
+ qemu_mutex_lock_iothread();
+ qemu_mutex_lock_ramlist();
+
+ memory_global_dirty_log_sync();
+ WITH_RCU_READ_LOCK_GUARD() {
+ RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+ ramblock_sync_dirty_bitmap(ram_state, block);
+ /* Discard this dirty bitmap record */
+ bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
+ }
+ memory_global_dirty_log_start();
+ }
+ ram_state->migration_dirty_pages = 0;
+ qemu_mutex_unlock_ramlist();
+ qemu_mutex_unlock_iothread();
+}
+
/* It is need to hold the global lock to call this helper */
void colo_release_ram_cache(void)
{
@@ -3040,9 +3056,7 @@ void colo_release_ram_cache(void)
}
}
}
- qemu_mutex_destroy(&ram_state->bitmap_mutex);
- g_free(ram_state);
- ram_state = NULL;
+ ram_state_cleanup(&ram_state);
}
/**
@@ -3356,7 +3370,7 @@ static int ram_load_precopy(QEMUFile *f)
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
ram_addr_t addr, total_ram_bytes;
- void *host = NULL;
+ void *host = NULL, *host_bak = NULL;
uint8_t ch;
/*
@@ -3387,20 +3401,35 @@ static int ram_load_precopy(QEMUFile *f)
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
RAMBlock *block = ram_block_from_stream(f, flags);
+ host = host_from_ram_block_offset(block, addr);
/*
- * After going into COLO, we should load the Page into colo_cache.
+ * After going into COLO stage, we should not load the page
+ * into SVM's memory directly, we put them into colo_cache firstly.
+ * NOTE: We need to keep a copy of SVM's ram in colo_cache.
+ * Previously, we copied all these memory in preparing stage of COLO
+ * while we need to stop VM, which is a time-consuming process.
+ * Here we optimize it by a trick, back-up every page while in
+ * migration process while COLO is enabled, though it affects the
+ * speed of the migration, but it obviously reduce the downtime of
+ * back-up all SVM'S memory in COLO preparing stage.
*/
- if (migration_incoming_in_colo_state()) {
- host = colo_cache_from_block_offset(block, addr);
- } else {
- host = host_from_ram_block_offset(block, addr);
+ if (migration_incoming_colo_enabled()) {
+ if (migration_incoming_in_colo_state()) {
+ /* In COLO stage, put all pages into cache temporarily */
+ host = colo_cache_from_block_offset(block, addr);
+ } else {
+ /*
+ * In migration stage but before COLO stage,
+ * Put all pages into both cache and SVM's memory.
+ */
+ host_bak = colo_cache_from_block_offset(block, addr);
+ }
}
if (!host) {
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
break;
}
-
if (!migration_incoming_in_colo_state()) {
ramblock_recv_bitmap_set(block, host);
}
@@ -3514,6 +3543,9 @@ static int ram_load_precopy(QEMUFile *f)
if (!ret) {
ret = qemu_file_get_error(f);
}
+ if (!ret && host_bak) {
+ memcpy(host_bak, host, TARGET_PAGE_SIZE);
+ }
}
ret |= wait_for_decompress_done();