diff options
-rw-r--r-- | dump/dump.c | 10 | ||||
-rw-r--r-- | include/migration/colo.h | 1 | ||||
-rw-r--r-- | migration/colo.c | 33 | ||||
-rw-r--r-- | migration/migration.c | 26 | ||||
-rw-r--r-- | migration/multifd-zlib.c | 48 | ||||
-rw-r--r-- | migration/multifd-zstd.c | 47 | ||||
-rw-r--r-- | migration/multifd.c | 70 | ||||
-rw-r--r-- | migration/multifd.h | 6 | ||||
-rw-r--r-- | migration/ram.c | 11 | ||||
-rw-r--r-- | migration/savevm.c | 5 |
10 files changed, 131 insertions, 126 deletions
diff --git a/dump/dump.c b/dump/dump.c index 662d0a6..a84d8b1 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -1293,14 +1293,6 @@ static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) return 0; } -/* - * check if the page is all 0 - */ -static inline bool is_zero_page(const uint8_t *buf, size_t page_size) -{ - return buffer_is_zero(buf, page_size); -} - static void write_dump_pages(DumpState *s, Error **errp) { int ret = 0; @@ -1357,7 +1349,7 @@ static void write_dump_pages(DumpState *s, Error **errp) */ while (get_next_page(&block_iter, &pfn_iter, &buf, s)) { /* check zero page */ - if (is_zero_page(buf, s->dump_info.page_size)) { + if (buffer_is_zero(buf, s->dump_info.page_size)) { ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), false); if (ret < 0) { diff --git a/include/migration/colo.h b/include/migration/colo.h index 768e1f0..5fbe1a6 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -37,4 +37,5 @@ COLOMode get_colo_mode(void); void colo_do_failover(void); void colo_checkpoint_notify(void *opaque); +void colo_shutdown(void); #endif diff --git a/migration/colo.c b/migration/colo.c index 2415325..5f7071b 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -530,7 +530,6 @@ static void colo_process_checkpoint(MigrationState *s) { QIOChannelBuffer *bioc; QEMUFile *fb = NULL; - int64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); Error *local_err = NULL; int ret; @@ -578,8 +577,8 @@ static void colo_process_checkpoint(MigrationState *s) qemu_mutex_unlock_iothread(); trace_colo_vm_state_change("stop", "run"); - timer_mod(s->colo_delay_timer, - current_time + s->parameters.x_checkpoint_delay); + timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) + + s->parameters.x_checkpoint_delay); while (s->state == MIGRATION_STATUS_COLO) { if (failover_get_state() != FAILOVER_STATUS_NONE) { @@ -667,8 +666,6 @@ void migrate_start_colo_process(MigrationState *s) colo_checkpoint_notify, s); qemu_sem_init(&s->colo_exit_sem, 0); - migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE, - MIGRATION_STATUS_COLO); colo_process_checkpoint(s); qemu_mutex_lock_iothread(); } @@ -683,8 +680,8 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, qemu_mutex_lock_iothread(); vm_stop_force_state(RUN_STATE_COLO); - trace_colo_vm_state_change("run", "stop"); qemu_mutex_unlock_iothread(); + trace_colo_vm_state_change("run", "stop"); /* FIXME: This is unnecessary for periodic checkpoint mode */ colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_REPLY, @@ -786,8 +783,8 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, vmstate_loading = false; vm_start(); - trace_colo_vm_state_change("stop", "run"); qemu_mutex_unlock_iothread(); + trace_colo_vm_state_change("stop", "run"); if (failover_get_state() == FAILOVER_STATUS_RELAUNCH) { return; @@ -820,6 +817,26 @@ static void colo_wait_handle_message(MigrationIncomingState *mis, } } +void colo_shutdown(void) +{ + MigrationIncomingState *mis = NULL; + MigrationState *s = NULL; + + switch (get_colo_mode()) { + case COLO_MODE_PRIMARY: + s = migrate_get_current(); + qemu_event_set(&s->colo_checkpoint_event); + qemu_sem_post(&s->colo_exit_sem); + break; + case COLO_MODE_SECONDARY: + mis = migration_incoming_get_current(); + qemu_sem_post(&mis->colo_incoming_sem); + break; + default: + break; + } +} + void *colo_process_incoming_thread(void *opaque) { MigrationIncomingState *mis = opaque; @@ -870,8 +887,8 @@ void *colo_process_incoming_thread(void *opaque) abort(); #endif vm_start(); - trace_colo_vm_state_change("stop", "run"); qemu_mutex_unlock_iothread(); + trace_colo_vm_state_change("stop", "run"); colo_send_message(mis->to_src_file, COLO_MESSAGE_CHECKPOINT_READY, &local_err); diff --git a/migration/migration.c b/migration/migration.c index abaf6f9..3de11ae 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -226,6 +226,12 @@ void migration_cancel(const Error *error) void migration_shutdown(void) { /* + * When the QEMU main thread exit, the COLO thread + * may wait a semaphore. So, we should wakeup the + * COLO thread before migration shutdown. + */ + colo_shutdown(); + /* * Cancel the current migration - that will (eventually) * stop the migration using this structure */ @@ -990,6 +996,8 @@ static void populate_time_info(MigrationInfo *info, MigrationState *s) static void populate_ram_info(MigrationInfo *info, MigrationState *s) { + size_t page_size = qemu_target_page_size(); + info->has_ram = true; info->ram = g_malloc0(sizeof(*info->ram)); info->ram->transferred = ram_counters.transferred; @@ -998,12 +1006,11 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) /* legacy value. It is not used anymore */ info->ram->skipped = 0; info->ram->normal = ram_counters.normal; - info->ram->normal_bytes = ram_counters.normal * - qemu_target_page_size(); + info->ram->normal_bytes = ram_counters.normal * page_size; info->ram->mbps = s->mbps; info->ram->dirty_sync_count = ram_counters.dirty_sync_count; info->ram->postcopy_requests = ram_counters.postcopy_requests; - info->ram->page_size = qemu_target_page_size(); + info->ram->page_size = page_size; info->ram->multifd_bytes = ram_counters.multifd_bytes; info->ram->pages_per_second = s->pages_per_second; @@ -3607,12 +3614,7 @@ static void migration_iteration_finish(MigrationState *s) migration_calculate_complete(s); runstate_set(RUN_STATE_POSTMIGRATE); break; - - case MIGRATION_STATUS_ACTIVE: - /* - * We should really assert here, but since it's during - * migration, let's try to reduce the usage of assertions. - */ + case MIGRATION_STATUS_COLO: if (!migrate_colo_enabled()) { error_report("%s: critical error: calling COLO code without " "COLO enabled", __func__); @@ -3622,6 +3624,12 @@ static void migration_iteration_finish(MigrationState *s) * Fixme: we will run VM in COLO no matter its old running state. * After exited COLO, we will keep running. */ + /* Fallthrough */ + case MIGRATION_STATUS_ACTIVE: + /* + * We should really assert here, but since it's during + * migration, let's try to reduce the usage of assertions. + */ s->vm_was_running = true; /* Fallthrough */ case MIGRATION_STATUS_FAILED: diff --git a/migration/multifd-zlib.c b/migration/multifd-zlib.c index ab4ba75..da62017 100644 --- a/migration/multifd-zlib.c +++ b/migration/multifd-zlib.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include <zlib.h> #include "qemu/rcu.h" +#include "exec/ramblock.h" #include "exec/target_page.h" #include "qapi/error.h" #include "migration.h" @@ -42,7 +43,6 @@ struct zlib_data { */ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); struct zlib_data *z = g_malloc0(sizeof(struct zlib_data)); z_stream *zs = &z->zs; @@ -54,9 +54,8 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) error_setg(errp, "multifd %d: deflate init failed", p->id); return -1; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - z->zbuff_len *= 2; + /* To be safe, we reserve twice the size of the packet */ + z->zbuff_len = MULTIFD_PACKET_SIZE * 2; z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { deflateEnd(&z->zs); @@ -74,6 +73,7 @@ static int zlib_send_setup(MultiFDSendParams *p, Error **errp) * Close the channel and return memory. * * @p: Params for the channel that we are using + * @errp: pointer to an error */ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) { @@ -95,27 +95,27 @@ static void zlib_send_cleanup(MultiFDSendParams *p, Error **errp) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used + * @errp: pointer to an error */ -static int zlib_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) +static int zlib_send_prepare(MultiFDSendParams *p, Error **errp) { - struct iovec *iov = p->pages->iov; struct zlib_data *z = p->data; + size_t page_size = qemu_target_page_size(); z_stream *zs = &z->zs; uint32_t out_size = 0; int ret; uint32_t i; - for (i = 0; i < used; i++) { + for (i = 0; i < p->pages->num; i++) { uint32_t available = z->zbuff_len - out_size; int flush = Z_NO_FLUSH; - if (i == used - 1) { + if (i == p->pages->num - 1) { flush = Z_SYNC_FLUSH; } - zs->avail_in = iov[i].iov_len; - zs->next_in = iov[i].iov_base; + zs->avail_in = page_size; + zs->next_in = p->pages->block->host + p->pages->offset[i]; zs->avail_out = available; zs->next_out = z->zbuff + out_size; @@ -180,7 +180,6 @@ static int zlib_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) */ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); struct zlib_data *z = g_malloc0(sizeof(struct zlib_data)); z_stream *zs = &z->zs; @@ -194,10 +193,8 @@ static int zlib_recv_setup(MultiFDRecvParams *p, Error **errp) error_setg(errp, "multifd %d: inflate init failed", p->id); return -1; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - /* We know compression "could" use more space */ - z->zbuff_len *= 2; + /* To be safe, we reserve twice the size of the packet */ + z->zbuff_len = MULTIFD_PACKET_SIZE * 2; z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { inflateEnd(zs); @@ -234,17 +231,17 @@ static void zlib_recv_cleanup(MultiFDRecvParams *p) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int zlib_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) +static int zlib_recv_pages(MultiFDRecvParams *p, Error **errp) { struct zlib_data *z = p->data; + size_t page_size = qemu_target_page_size(); z_stream *zs = &z->zs; uint32_t in_size = p->next_packet_size; /* we measure the change of total_out */ uint32_t out_size = zs->total_out; - uint32_t expected_size = used * qemu_target_page_size(); + uint32_t expected_size = p->pages->num * qemu_target_page_size(); uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; int ret; int i; @@ -263,17 +260,16 @@ static int zlib_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) zs->avail_in = in_size; zs->next_in = z->zbuff; - for (i = 0; i < used; i++) { - struct iovec *iov = &p->pages->iov[i]; + for (i = 0; i < p->pages->num; i++) { int flush = Z_NO_FLUSH; unsigned long start = zs->total_out; - if (i == used - 1) { + if (i == p->pages->num - 1) { flush = Z_SYNC_FLUSH; } - zs->avail_out = iov->iov_len; - zs->next_out = iov->iov_base; + zs->avail_out = page_size; + zs->next_out = p->pages->block->host + p->pages->offset[i]; /* * Welcome to inflate semantics @@ -286,8 +282,8 @@ static int zlib_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) do { ret = inflate(zs, flush); } while (ret == Z_OK && zs->avail_in - && (zs->total_out - start) < iov->iov_len); - if (ret == Z_OK && (zs->total_out - start) < iov->iov_len) { + && (zs->total_out - start) < page_size); + if (ret == Z_OK && (zs->total_out - start) < page_size) { error_setg(errp, "multifd %d: inflate generated too few output", p->id); return -1; diff --git a/migration/multifd-zstd.c b/migration/multifd-zstd.c index 693bddf..2d5b611 100644 --- a/migration/multifd-zstd.c +++ b/migration/multifd-zstd.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include <zstd.h> #include "qemu/rcu.h" +#include "exec/ramblock.h" #include "exec/target_page.h" #include "qapi/error.h" #include "migration.h" @@ -47,7 +48,6 @@ struct zstd_data { */ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); struct zstd_data *z = g_new0(struct zstd_data, 1); int res; @@ -67,9 +67,8 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) p->id, ZSTD_getErrorName(res)); return -1; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - z->zbuff_len *= 2; + /* To be safe, we reserve twice the size of the packet */ + z->zbuff_len = MULTIFD_PACKET_SIZE * 2; z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { ZSTD_freeCStream(z->zcs); @@ -86,6 +85,7 @@ static int zstd_send_setup(MultiFDSendParams *p, Error **errp) * Close the channel and return memory. * * @p: Params for the channel that we are using + * @errp: pointer to an error */ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) { @@ -108,12 +108,12 @@ static void zstd_send_cleanup(MultiFDSendParams *p, Error **errp) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used + * @errp: pointer to an error */ -static int zstd_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) +static int zstd_send_prepare(MultiFDSendParams *p, Error **errp) { - struct iovec *iov = p->pages->iov; struct zstd_data *z = p->data; + size_t page_size = qemu_target_page_size(); int ret; uint32_t i; @@ -121,14 +121,14 @@ static int zstd_send_prepare(MultiFDSendParams *p, uint32_t used, Error **errp) z->out.size = z->zbuff_len; z->out.pos = 0; - for (i = 0; i < used; i++) { + for (i = 0; i < p->pages->num; i++) { ZSTD_EndDirective flush = ZSTD_e_continue; - if (i == used - 1) { + if (i == p->pages->num - 1) { flush = ZSTD_e_flush; } - z->in.src = iov[i].iov_base; - z->in.size = iov[i].iov_len; + z->in.src = p->pages->block->host + p->pages->offset[i]; + z->in.size = page_size; z->in.pos = 0; /* @@ -191,7 +191,6 @@ static int zstd_send_write(MultiFDSendParams *p, uint32_t used, Error **errp) */ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) { - uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size(); struct zstd_data *z = g_new0(struct zstd_data, 1); int ret; @@ -212,10 +211,8 @@ static int zstd_recv_setup(MultiFDRecvParams *p, Error **errp) return -1; } - /* We will never have more than page_count pages */ - z->zbuff_len = page_count * qemu_target_page_size(); - /* We know compression "could" use more space */ - z->zbuff_len *= 2; + /* To be safe, we reserve twice the size of the packet */ + z->zbuff_len = MULTIFD_PACKET_SIZE * 2; z->zbuff = g_try_malloc(z->zbuff_len); if (!z->zbuff) { ZSTD_freeDStream(z->zds); @@ -254,14 +251,14 @@ static void zstd_recv_cleanup(MultiFDRecvParams *p) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int zstd_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) +static int zstd_recv_pages(MultiFDRecvParams *p, Error **errp) { uint32_t in_size = p->next_packet_size; uint32_t out_size = 0; - uint32_t expected_size = used * qemu_target_page_size(); + size_t page_size = qemu_target_page_size(); + uint32_t expected_size = p->pages->num * page_size; uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; struct zstd_data *z = p->data; int ret; @@ -282,11 +279,9 @@ static int zstd_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) z->in.size = in_size; z->in.pos = 0; - for (i = 0; i < used; i++) { - struct iovec *iov = &p->pages->iov[i]; - - z->out.dst = iov->iov_base; - z->out.size = iov->iov_len; + for (i = 0; i < p->pages->num; i++) { + z->out.dst = p->pages->block->host + p->pages->offset[i]; + z->out.size = page_size; z->out.pos = 0; /* @@ -300,8 +295,8 @@ static int zstd_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) do { ret = ZSTD_decompressStream(z->zds, &z->out, &z->in); } while (ret > 0 && (z->in.size - z->in.pos > 0) - && (z->out.pos < iov->iov_len)); - if (ret > 0 && (z->out.pos < iov->iov_len)) { + && (z->out.pos < page_size)); + if (ret > 0 && (z->out.pos < page_size)) { error_setg(errp, "multifd %d: decompressStream buffer too small", p->id); return -1; diff --git a/migration/multifd.c b/migration/multifd.c index 7c9deb1..3242f68 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -66,6 +66,7 @@ static int nocomp_send_setup(MultiFDSendParams *p, Error **errp) * For no compression this function does nothing. * * @p: Params for the channel that we are using + * @errp: pointer to an error */ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) { @@ -81,13 +82,11 @@ static void nocomp_send_cleanup(MultiFDSendParams *p, Error **errp) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int nocomp_send_prepare(MultiFDSendParams *p, uint32_t used, - Error **errp) +static int nocomp_send_prepare(MultiFDSendParams *p, Error **errp) { - p->next_packet_size = used * qemu_target_page_size(); + p->next_packet_size = p->pages->num * qemu_target_page_size(); p->flags |= MULTIFD_FLAG_NOCOMP; return 0; } @@ -142,10 +141,9 @@ static void nocomp_recv_cleanup(MultiFDRecvParams *p) * Returns 0 for success or -1 for error * * @p: Params for the channel that we are using - * @used: number of pages used * @errp: pointer to an error */ -static int nocomp_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) +static int nocomp_recv_pages(MultiFDRecvParams *p, Error **errp) { uint32_t flags = p->flags & MULTIFD_FLAG_COMPRESSION_MASK; @@ -154,7 +152,7 @@ static int nocomp_recv_pages(MultiFDRecvParams *p, uint32_t used, Error **errp) p->id, flags, MULTIFD_FLAG_NOCOMP); return -1; } - return qio_channel_readv_all(p->c, p->pages->iov, used, errp); + return qio_channel_readv_all(p->c, p->pages->iov, p->pages->num, errp); } static MultiFDMethods multifd_nocomp_ops = { @@ -252,7 +250,7 @@ static MultiFDPages_t *multifd_pages_init(size_t size) static void multifd_pages_clear(MultiFDPages_t *pages) { - pages->used = 0; + pages->num = 0; pages->allocated = 0; pages->packet_num = 0; pages->block = NULL; @@ -270,7 +268,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) packet->flags = cpu_to_be32(p->flags); packet->pages_alloc = cpu_to_be32(p->pages->allocated); - packet->pages_used = cpu_to_be32(p->pages->used); + packet->pages_used = cpu_to_be32(p->pages->num); packet->next_packet_size = cpu_to_be32(p->next_packet_size); packet->packet_num = cpu_to_be64(p->packet_num); @@ -278,7 +276,7 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) strncpy(packet->ramblock, p->pages->block->idstr, 256); } - for (i = 0; i < p->pages->used; i++) { + for (i = 0; i < p->pages->num; i++) { /* there are architectures where ram_addr_t is 32 bit */ uint64_t temp = p->pages->offset[i]; @@ -289,7 +287,8 @@ static void multifd_send_fill_packet(MultiFDSendParams *p) static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) { MultiFDPacket_t *packet = p->packet; - uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size(); + size_t page_size = qemu_target_page_size(); + uint32_t pages_max = MULTIFD_PACKET_SIZE / page_size; RAMBlock *block; int i; @@ -331,18 +330,18 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) p->pages = multifd_pages_init(packet->pages_alloc); } - p->pages->used = be32_to_cpu(packet->pages_used); - if (p->pages->used > packet->pages_alloc) { + p->pages->num = be32_to_cpu(packet->pages_used); + if (p->pages->num > packet->pages_alloc) { error_setg(errp, "multifd: received packet " "with %d pages and expected maximum pages are %d", - p->pages->used, packet->pages_alloc) ; + p->pages->num, packet->pages_alloc) ; return -1; } p->next_packet_size = be32_to_cpu(packet->next_packet_size); p->packet_num = be64_to_cpu(packet->packet_num); - if (p->pages->used == 0) { + if (p->pages->num == 0) { return 0; } @@ -355,17 +354,19 @@ static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp) return -1; } - for (i = 0; i < p->pages->used; i++) { + p->pages->block = block; + for (i = 0; i < p->pages->num; i++) { uint64_t offset = be64_to_cpu(packet->offset[i]); - if (offset > (block->used_length - qemu_target_page_size())) { + if (offset > (block->used_length - page_size)) { error_setg(errp, "multifd: offset too long %" PRIu64 " (max " RAM_ADDR_FMT ")", offset, block->used_length); return -1; } + p->pages->offset[i] = offset; p->pages->iov[i].iov_base = block->host + offset; - p->pages->iov[i].iov_len = qemu_target_page_size(); + p->pages->iov[i].iov_len = page_size; } return 0; @@ -442,13 +443,13 @@ static int multifd_send_pages(QEMUFile *f) } qemu_mutex_unlock(&p->mutex); } - assert(!p->pages->used); + assert(!p->pages->num); assert(!p->pages->block); p->packet_num = multifd_send_state->packet_num++; multifd_send_state->pages = p->pages; p->pages = pages; - transferred = ((uint64_t) pages->used) * qemu_target_page_size() + transferred = ((uint64_t) pages->num) * qemu_target_page_size() + p->packet_len; qemu_file_update_transfer(f, transferred); ram_counters.multifd_bytes += transferred; @@ -468,12 +469,12 @@ int multifd_queue_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset) } if (pages->block == block) { - pages->offset[pages->used] = offset; - pages->iov[pages->used].iov_base = block->host + offset; - pages->iov[pages->used].iov_len = qemu_target_page_size(); - pages->used++; + pages->offset[pages->num] = offset; + pages->iov[pages->num].iov_base = block->host + offset; + pages->iov[pages->num].iov_len = qemu_target_page_size(); + pages->num++; - if (pages->used < pages->allocated) { + if (pages->num < pages->allocated) { return 1; } } @@ -523,6 +524,9 @@ static void multifd_send_terminate_threads(Error *err) qemu_mutex_lock(&p->mutex); p->quit = true; qemu_sem_post(&p->sem); + if (p->c) { + qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + } qemu_mutex_unlock(&p->mutex); } } @@ -585,7 +589,7 @@ void multifd_send_sync_main(QEMUFile *f) if (!migrate_use_multifd()) { return; } - if (multifd_send_state->pages->used) { + if (multifd_send_state->pages->num) { if (multifd_send_pages(f) < 0) { error_report("%s: multifd_send_pages fail", __func__); return; @@ -627,7 +631,6 @@ static void *multifd_send_thread(void *opaque) MultiFDSendParams *p = opaque; Error *local_err = NULL; int ret = 0; - uint32_t flags = 0; trace_multifd_send_thread_start(p->id); rcu_register_thread(); @@ -648,13 +651,12 @@ static void *multifd_send_thread(void *opaque) qemu_mutex_lock(&p->mutex); if (p->pending_job) { - uint32_t used = p->pages->used; + uint32_t used = p->pages->num; uint64_t packet_num = p->packet_num; - flags = p->flags; + uint32_t flags = p->flags; if (used) { - ret = multifd_send_state->ops->send_prepare(p, used, - &local_err); + ret = multifd_send_state->ops->send_prepare(p, &local_err); if (ret != 0) { qemu_mutex_unlock(&p->mutex); break; @@ -664,7 +666,7 @@ static void *multifd_send_thread(void *opaque) p->flags = 0; p->num_packets++; p->num_pages += used; - p->pages->used = 0; + p->pages->num = 0; p->pages->block = NULL; qemu_mutex_unlock(&p->mutex); @@ -1090,7 +1092,7 @@ static void *multifd_recv_thread(void *opaque) break; } - used = p->pages->used; + used = p->pages->num; flags = p->flags; /* recv methods don't know how to handle the SYNC flag */ p->flags &= ~MULTIFD_FLAG_SYNC; @@ -1101,7 +1103,7 @@ static void *multifd_recv_thread(void *opaque) qemu_mutex_unlock(&p->mutex); if (used) { - ret = multifd_recv_state->ops->recv_pages(p, used, &local_err); + ret = multifd_recv_state->ops->recv_pages(p, &local_err); if (ret != 0) { break; } diff --git a/migration/multifd.h b/migration/multifd.h index 15c50ca..e57adc7 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -55,7 +55,7 @@ typedef struct { typedef struct { /* number of used pages */ - uint32_t used; + uint32_t num; /* number of allocated pages */ uint32_t allocated; /* global number of generated multifd packets */ @@ -159,7 +159,7 @@ typedef struct { /* Cleanup for sending side */ void (*send_cleanup)(MultiFDSendParams *p, Error **errp); /* Prepare the send packet */ - int (*send_prepare)(MultiFDSendParams *p, uint32_t used, Error **errp); + int (*send_prepare)(MultiFDSendParams *p, Error **errp); /* Write the send packet */ int (*send_write)(MultiFDSendParams *p, uint32_t used, Error **errp); /* Setup for receiving side */ @@ -167,7 +167,7 @@ typedef struct { /* Cleanup for receiving side */ void (*recv_cleanup)(MultiFDRecvParams *p); /* Read all pages */ - int (*recv_pages)(MultiFDRecvParams *p, uint32_t used, Error **errp); + int (*recv_pages)(MultiFDRecvParams *p, Error **errp); } MultiFDMethods; void multifd_register_ops(int method, MultiFDMethods *ops); diff --git a/migration/ram.c b/migration/ram.c index 863035d..57efa67 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -81,11 +81,6 @@ /* 0x80 is reserved in migration.h start with 0x100 next */ #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100 -static inline bool is_zero_range(uint8_t *p, uint64_t size) -{ - return buffer_is_zero(p, size); -} - XBZRLECacheStats xbzrle_counters; /* struct contains XBZRLE cache and a static page @@ -1180,7 +1175,7 @@ static int save_zero_page_to_file(RAMState *rs, QEMUFile *file, uint8_t *p = block->host + offset; int len = 0; - if (is_zero_range(p, TARGET_PAGE_SIZE)) { + if (buffer_is_zero(p, TARGET_PAGE_SIZE)) { len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO); qemu_put_byte(file, 0); len += 1; @@ -3367,7 +3362,7 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block, */ void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) { - if (ch != 0 || !is_zero_range(host, size)) { + if (ch != 0 || !buffer_is_zero(host, size)) { memset(host, ch, size); } } @@ -3918,7 +3913,6 @@ void colo_flush_ram_cache(void) unsigned long offset = 0; memory_global_dirty_log_sync(); - qemu_mutex_lock(&ram_state->bitmap_mutex); WITH_RCU_READ_LOCK_GUARD() { RAMBLOCK_FOREACH_NOT_IGNORED(block) { ramblock_sync_dirty_bitmap(ram_state, block); @@ -3954,7 +3948,6 @@ void colo_flush_ram_cache(void) } } trace_colo_flush_ram_cache_end(); - qemu_mutex_unlock(&ram_state->bitmap_mutex); } /** diff --git a/migration/savevm.c b/migration/savevm.c index d59e976..0bef031 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1685,6 +1685,7 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, { PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; + size_t page_size = qemu_target_page_size(); Error *local_err = NULL; trace_loadvm_postcopy_handle_advise(); @@ -1741,13 +1742,13 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, } remote_tps = qemu_get_be64(mis->from_src_file); - if (remote_tps != qemu_target_page_size()) { + if (remote_tps != page_size) { /* * Again, some differences could be dealt with, but for now keep it * simple. */ error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", - (int)remote_tps, qemu_target_page_size()); + (int)remote_tps, page_size); return -1; } |