aboutsummaryrefslogtreecommitdiff
path: root/migration/ram.c
diff options
context:
space:
mode:
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c112
1 files changed, 112 insertions, 0 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 1233ff5..16ade7c 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -546,6 +546,8 @@ static QemuThread *decompress_threads;
static QemuMutex decomp_done_lock;
static QemuCond decomp_done_cond;
+static int ram_save_host_page_urgent(PageSearchStatus *pss);
+
static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
ram_addr_t offset, uint8_t *source_buf);
@@ -560,6 +562,16 @@ static void pss_init(PageSearchStatus *pss, RAMBlock *rb, ram_addr_t page)
pss->complete_round = false;
}
+/*
+ * Check whether two PSSs are actively sending the same page. Return true
+ * if it is, false otherwise.
+ */
+static bool pss_overlap(PageSearchStatus *pss1, PageSearchStatus *pss2)
+{
+ return pss1->host_page_sending && pss2->host_page_sending &&
+ (pss1->host_page_start == pss2->host_page_start);
+}
+
static void *do_data_compress(void *opaque)
{
CompressParam *param = opaque;
@@ -2260,6 +2272,57 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
return -1;
}
+ /*
+ * When with postcopy preempt, we send back the page directly in the
+ * rp-return thread.
+ */
+ if (postcopy_preempt_active()) {
+ ram_addr_t page_start = start >> TARGET_PAGE_BITS;
+ size_t page_size = qemu_ram_pagesize(ramblock);
+ PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_POSTCOPY];
+ int ret = 0;
+
+ qemu_mutex_lock(&rs->bitmap_mutex);
+
+ pss_init(pss, ramblock, page_start);
+ /*
+ * Always use the preempt channel, and make sure it's there. It's
+ * safe to access without lock, because when rp-thread is running
+ * we should be the only one who operates on the qemufile
+ */
+ pss->pss_channel = migrate_get_current()->postcopy_qemufile_src;
+ pss->postcopy_requested = true;
+ assert(pss->pss_channel);
+
+ /*
+ * It must be either one or multiple of host page size. Just
+ * assert; if something wrong we're mostly split brain anyway.
+ */
+ assert(len % page_size == 0);
+ while (len) {
+ if (ram_save_host_page_urgent(pss)) {
+ error_report("%s: ram_save_host_page_urgent() failed: "
+ "ramblock=%s, start_addr=0x"RAM_ADDR_FMT,
+ __func__, ramblock->idstr, start);
+ ret = -1;
+ break;
+ }
+ /*
+ * NOTE: after ram_save_host_page_urgent() succeeded, pss->page
+ * will automatically be moved and point to the next host page
+ * we're going to send, so no need to update here.
+ *
+ * Normally QEMU never sends >1 host page in requests, so
+ * logically we don't even need that as the loop should only
+ * run once, but just to be consistent.
+ */
+ len -= page_size;
+ };
+ qemu_mutex_unlock(&rs->bitmap_mutex);
+
+ return ret;
+ }
+
struct RAMSrcPageRequest *new_entry =
g_new0(struct RAMSrcPageRequest, 1);
new_entry->rb = ramblock;
@@ -2537,6 +2600,55 @@ static void pss_host_page_finish(PageSearchStatus *pss)
pss->host_page_start = pss->host_page_end = 0;
}
+/*
+ * Send an urgent host page specified by `pss'. Need to be called with
+ * bitmap_mutex held.
+ *
+ * Returns 0 if save host page succeeded, false otherwise.
+ */
+static int ram_save_host_page_urgent(PageSearchStatus *pss)
+{
+ bool page_dirty, sent = false;
+ RAMState *rs = ram_state;
+ int ret = 0;
+
+ trace_postcopy_preempt_send_host_page(pss->block->idstr, pss->page);
+ pss_host_page_prepare(pss);
+
+ /*
+ * If precopy is sending the same page, let it be done in precopy, or
+ * we could send the same page in two channels and none of them will
+ * receive the whole page.
+ */
+ if (pss_overlap(pss, &ram_state->pss[RAM_CHANNEL_PRECOPY])) {
+ trace_postcopy_preempt_hit(pss->block->idstr,
+ pss->page << TARGET_PAGE_BITS);
+ return 0;
+ }
+
+ do {
+ page_dirty = migration_bitmap_clear_dirty(rs, pss->block, pss->page);
+
+ if (page_dirty) {
+ /* Be strict to return code; it must be 1, or what else? */
+ if (ram_save_target_page(rs, pss) != 1) {
+ error_report_once("%s: ram_save_target_page failed", __func__);
+ ret = -1;
+ goto out;
+ }
+ sent = true;
+ }
+ pss_find_next_dirty(pss);
+ } while (pss_within_range(pss));
+out:
+ pss_host_page_finish(pss);
+ /* For urgent requests, flush immediately if sent */
+ if (sent) {
+ qemu_fflush(pss->pss_channel);
+ }
+ return ret;
+}
+
/**
* ram_save_host_page: save a whole host page
*