diff options
author | Leonardo Bras <leobras@redhat.com> | 2022-05-13 03:28:37 -0300 |
---|---|---|
committer | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2022-05-16 13:56:24 +0100 |
commit | 5b1d9bab2da4fca3a3caee97c430e5709cb32b7b (patch) | |
tree | 265f27a81f28657d8108cf6da54ef830d7d16c53 /migration/multifd.c | |
parent | b7dbdd8e76cd03453c234dbb9578d20969859d74 (diff) | |
download | qemu-5b1d9bab2da4fca3a3caee97c430e5709cb32b7b.zip qemu-5b1d9bab2da4fca3a3caee97c430e5709cb32b7b.tar.gz qemu-5b1d9bab2da4fca3a3caee97c430e5709cb32b7b.tar.bz2 |
multifd: Implement zero copy write in multifd migration (multifd-zero-copy)
Implement zero copy send on nocomp_send_write(), by making use of QIOChannel
writev + flags & flush interface.
Change multifd_send_sync_main() so flush_zero_copy() can be called
after each iteration in order to make sure all dirty pages are sent before
a new iteration is started. It will also flush at the beginning and at the
end of migration.
Also make it return -1 if flush_zero_copy() fails, in order to cancel
the migration process, and avoid resuming the guest in the target host
without receiving all current RAM.
This will work fine on RAM migration because the RAM pages are not usually freed,
and there is no problem on changing the pages content between writev_zero_copy() and
the actual sending of the buffer, because this change will dirty the page and
cause it to be re-sent on a next iteration anyway.
A lot of locked memory may be needed in order to use multifd migration
with zero-copy enabled, so disabling the feature should be necessary for
low-privileged users trying to perform multifd migrations.
Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>
Message-Id: <20220513062836.965425-9-leobras@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration/multifd.c')
-rw-r--r-- | migration/multifd.c | 37 |
1 files changed, 35 insertions, 2 deletions
diff --git a/migration/multifd.c b/migration/multifd.c index 2541cd2..9282ab6 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -569,6 +569,7 @@ void multifd_save_cleanup(void) int multifd_send_sync_main(QEMUFile *f) { int i; + bool flush_zero_copy; if (!migrate_use_multifd()) { return 0; @@ -579,6 +580,20 @@ int multifd_send_sync_main(QEMUFile *f) return -1; } } + + /* + * When using zero-copy, it's necessary to flush the pages before any of + * the pages can be sent again, so we'll make sure the new version of the + * pages will always arrive _later_ than the old pages. + * + * Currently we achieve this by flushing the zero-page requested writes + * per ram iteration, but in the future we could potentially optimize it + * to be less frequent, e.g. only after we finished one whole scanning of + * all the dirty bitmaps. + */ + + flush_zero_copy = migrate_use_zero_copy_send(); + for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; @@ -600,6 +615,17 @@ int multifd_send_sync_main(QEMUFile *f) ram_counters.transferred += p->packet_len; qemu_mutex_unlock(&p->mutex); qemu_sem_post(&p->sem); + + if (flush_zero_copy && p->c) { + int ret; + Error *err = NULL; + + ret = qio_channel_flush(p->c, &err); + if (ret < 0) { + error_report_err(err); + return -1; + } + } } for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; @@ -684,8 +710,8 @@ static void *multifd_send_thread(void *opaque) p->iov[0].iov_base = p->packet; } - ret = qio_channel_writev_all(p->c, p->iov, p->iovs_num, - &local_err); + ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num, NULL, + 0, p->write_flags, &local_err); if (ret != 0) { break; } @@ -913,6 +939,13 @@ int multifd_save_setup(Error **errp) /* We need one extra place for the packet header */ p->iov = g_new0(struct iovec, page_count + 1); p->normal = g_new0(ram_addr_t, page_count); + + if (migrate_use_zero_copy_send()) { + p->write_flags = QIO_CHANNEL_WRITE_FLAG_ZERO_COPY; + } else { + p->write_flags = 0; + } + socket_send_channel_create(multifd_new_send_channel_async, p); } |