aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--migration/ram.c112
1 files changed, 61 insertions, 51 deletions
diff --git a/migration/ram.c b/migration/ram.c
index d81bf7b..3194997 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -85,6 +85,46 @@
XBZRLECacheStats xbzrle_counters;
+/* used by the search for pages to send */
+struct PageSearchStatus {
+ /* The migration channel used for a specific host page */
+ QEMUFile *pss_channel;
+ /* Current block being searched */
+ RAMBlock *block;
+ /* Current page to search from */
+ unsigned long page;
+ /* Set once we wrap around */
+ bool complete_round;
+ /*
+ * [POSTCOPY-ONLY] Whether current page is explicitly requested by
+ * postcopy. When set, the request is "urgent" because the dest QEMU
+ * threads are waiting for us.
+ */
+ bool postcopy_requested;
+ /*
+ * [POSTCOPY-ONLY] The target channel to use to send current page.
+ *
+ * Note: This may _not_ match with the value in postcopy_requested
+ * above. Let's imagine the case where the postcopy request is exactly
+ * the page that we're sending in progress during precopy. In this case
+ * we'll have postcopy_requested set to true but the target channel
+ * will be the precopy channel (so that we don't split brain on that
+ * specific page since the precopy channel already contains partial of
+ * that page data).
+ *
+ * Besides that specific use case, postcopy_target_channel should
+ * always be equal to postcopy_requested, because by default we send
+ * postcopy pages via postcopy preempt channel.
+ */
+ bool postcopy_target_channel;
+ /* Whether we're sending a host page */
+ bool host_page_sending;
+ /* The start/end of current host page. Invalid if host_page_sending==false */
+ unsigned long host_page_start;
+ unsigned long host_page_end;
+};
+typedef struct PageSearchStatus PageSearchStatus;
+
/* struct contains XBZRLE cache and a static page
used by the compression */
static struct {
@@ -319,6 +359,11 @@ typedef struct {
struct RAMState {
/* QEMUFile used for this migration */
QEMUFile *f;
+ /*
+ * PageSearchStatus structures for the channels when send pages.
+ * Protected by the bitmap_mutex.
+ */
+ PageSearchStatus pss[RAM_CHANNEL_MAX];
/* UFFD file descriptor, used in 'write-tracking' migration */
int uffdio_fd;
/* Last block that we have visited searching for dirty pages */
@@ -362,7 +407,12 @@ struct RAMState {
uint64_t target_page_count;
/* number of dirty bits in the bitmap */
uint64_t migration_dirty_pages;
- /* Protects modification of the bitmap and migration dirty pages */
+ /*
+ * Protects:
+ * - dirty/clear bitmap
+ * - migration_dirty_pages
+ * - pss structures
+ */
QemuMutex bitmap_mutex;
/* The RAMBlock used in the last src_page_requests */
RAMBlock *last_req_rb;
@@ -451,46 +501,6 @@ void dirty_sync_missed_zero_copy(void)
ram_counters.dirty_sync_missed_zero_copy++;
}
-/* used by the search for pages to send */
-struct PageSearchStatus {
- /* The migration channel used for a specific host page */
- QEMUFile *pss_channel;
- /* Current block being searched */
- RAMBlock *block;
- /* Current page to search from */
- unsigned long page;
- /* Set once we wrap around */
- bool complete_round;
- /*
- * [POSTCOPY-ONLY] Whether current page is explicitly requested by
- * postcopy. When set, the request is "urgent" because the dest QEMU
- * threads are waiting for us.
- */
- bool postcopy_requested;
- /*
- * [POSTCOPY-ONLY] The target channel to use to send current page.
- *
- * Note: This may _not_ match with the value in postcopy_requested
- * above. Let's imagine the case where the postcopy request is exactly
- * the page that we're sending in progress during precopy. In this case
- * we'll have postcopy_requested set to true but the target channel
- * will be the precopy channel (so that we don't split brain on that
- * specific page since the precopy channel already contains partial of
- * that page data).
- *
- * Besides that specific use case, postcopy_target_channel should
- * always be equal to postcopy_requested, because by default we send
- * postcopy pages via postcopy preempt channel.
- */
- bool postcopy_target_channel;
- /* Whether we're sending a host page */
- bool host_page_sending;
- /* The start/end of current host page. Only valid if host_page_sending==true */
- unsigned long host_page_start;
- unsigned long host_page_end;
-};
-typedef struct PageSearchStatus PageSearchStatus;
-
CompressionStats compression_counters;
struct CompressParam {
@@ -2637,7 +2647,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
*/
static int ram_find_and_save_block(RAMState *rs)
{
- PageSearchStatus pss;
+ PageSearchStatus *pss = &rs->pss[RAM_CHANNEL_PRECOPY];
int pages = 0;
bool again, found;
@@ -2658,11 +2668,11 @@ static int ram_find_and_save_block(RAMState *rs)
rs->last_page = 0;
}
- pss_init(&pss, rs->last_seen_block, rs->last_page);
+ pss_init(pss, rs->last_seen_block, rs->last_page);
do {
again = true;
- found = get_queued_page(rs, &pss);
+ found = get_queued_page(rs, pss);
if (!found) {
/*
@@ -2670,27 +2680,27 @@ static int ram_find_and_save_block(RAMState *rs)
* preempted precopy. Otherwise find the next dirty bit.
*/
if (postcopy_preempt_triggered(rs)) {
- postcopy_preempt_restore(rs, &pss, false);
+ postcopy_preempt_restore(rs, pss, false);
found = true;
} else {
/* priority queue empty, so just search for something dirty */
- found = find_dirty_block(rs, &pss, &again);
+ found = find_dirty_block(rs, pss, &again);
}
}
if (found) {
/* Update rs->f with correct channel */
if (postcopy_preempt_active()) {
- postcopy_preempt_choose_channel(rs, &pss);
+ postcopy_preempt_choose_channel(rs, pss);
}
/* Cache rs->f in pss_channel (TODO: remove rs->f) */
- pss.pss_channel = rs->f;
- pages = ram_save_host_page(rs, &pss);
+ pss->pss_channel = rs->f;
+ pages = ram_save_host_page(rs, pss);
}
} while (!pages && again);
- rs->last_seen_block = pss.block;
- rs->last_page = pss.page;
+ rs->last_seen_block = pss->block;
+ rs->last_page = pss->page;
return pages;
}