diff options
author | Peter Xu <peterx@redhat.com> | 2020-10-21 17:27:19 -0400 |
---|---|---|
committer | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2020-10-26 16:15:04 +0000 |
commit | 0c26781c0937324d175b8105bc96ccce778d9760 (patch) | |
tree | 4cdf53551febac2a1b5808bed18955c72f38c808 /migration/savevm.c | |
parent | 8f8bfffcf1b486cee9a3bc79bb9b174682b06e22 (diff) | |
download | qemu-0c26781c0937324d175b8105bc96ccce778d9760.zip qemu-0c26781c0937324d175b8105bc96ccce778d9760.tar.gz qemu-0c26781c0937324d175b8105bc96ccce778d9760.tar.bz2 |
migration: Sync requested pages after postcopy recovery
We synchronize the requested pages right after a postcopy recovery happens.
This helps to synchronize the prioritized pages on source so that the faulted
threads can be served faster.
Reported-by: Xiaohui Li <xiaohli@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20201021212721.440373-5-peterx@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration/savevm.c')
-rw-r--r-- | migration/savevm.c | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/migration/savevm.c b/migration/savevm.c index 6ebd7e6..21ccba9 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -2010,6 +2010,49 @@ static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) return LOADVM_QUIT; } +/* We must be with page_request_mutex held */ +static gboolean postcopy_sync_page_req(gpointer key, gpointer value, + gpointer data) +{ + MigrationIncomingState *mis = data; + void *host_addr = (void *) key; + ram_addr_t rb_offset; + RAMBlock *rb; + int ret; + + rb = qemu_ram_block_from_host(host_addr, true, &rb_offset); + if (!rb) { + /* + * This should _never_ happen. However be nice for a migrating VM to + * not crash/assert. Post an error (note: intended to not use *_once + * because we do want to see all the illegal addresses; and this can + * never be triggered by the guest so we're safe) and move on next. + */ + error_report("%s: illegal host addr %p", __func__, host_addr); + /* Try the next entry */ + return FALSE; + } + + ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset); + if (ret) { + /* Please refer to above comment. */ + error_report("%s: send rp message failed for addr %p", + __func__, host_addr); + return FALSE; + } + + trace_postcopy_page_req_sync(host_addr); + + return FALSE; +} + +static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) +{ + WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { + g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis); + } +} + static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) { if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { @@ -2032,6 +2075,20 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) /* Tell source that "we are ready" */ migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE); + /* + * After a postcopy recovery, the source should have lost the postcopy + * queue, or potentially the requested pages could have been lost during + * the network down phase. Let's re-sync with the source VM by re-sending + * all the pending pages that we eagerly need, so these threads won't get + * blocked too long due to the recovery. + * + * Without this procedure, the faulted destination VM threads (waiting for + * page requests right before the postcopy is interrupted) can keep hanging + * until the pages are sent by the source during the background copying of + * pages, or another thread faulted on the same address accidentally. + */ + migrate_send_rp_req_pages_pending(mis); + return 0; } |