diff options
Diffstat (limited to 'migration/savevm.c')
-rw-r--r-- | migration/savevm.c | 452 |
1 files changed, 318 insertions, 134 deletions
diff --git a/migration/savevm.c b/migration/savevm.c index deb5783..bb04a45 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -37,6 +37,7 @@ #include "migration/register.h" #include "migration/global_state.h" #include "migration/channel-block.h" +#include "multifd.h" #include "ram.h" #include "qemu-file.h" #include "savevm.h" @@ -46,27 +47,29 @@ #include "qapi/clone-visitor.h" #include "qapi/qapi-builtin-visit.h" #include "qemu/error-report.h" -#include "sysemu/cpus.h" -#include "exec/memory.h" +#include "system/cpus.h" +#include "system/memory.h" #include "exec/target_page.h" +#include "exec/page-vary.h" #include "trace.h" #include "qemu/iov.h" #include "qemu/job.h" #include "qemu/main-loop.h" #include "block/snapshot.h" +#include "block/thread-pool.h" #include "qemu/cutils.h" #include "io/channel-buffer.h" #include "io/channel-file.h" -#include "sysemu/replay.h" -#include "sysemu/runstate.h" -#include "sysemu/sysemu.h" -#include "sysemu/xen.h" +#include "system/replay.h" +#include "system/runstate.h" +#include "system/system.h" +#include "system/xen.h" #include "migration/colo.h" #include "qemu/bitmap.h" #include "net/announce.h" #include "qemu/yank.h" #include "yank_functions.h" -#include "sysemu/qtest.h" +#include "system/qtest.h" #include "options.h" const unsigned int postcopy_ram_discard_version; @@ -90,6 +93,7 @@ enum qemu_vm_cmd { MIG_CMD_ENABLE_COLO, /* Enable COLO */ MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */ MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */ + MIG_CMD_SWITCHOVER_START, /* Switchover start notification */ MIG_CMD_MAX }; @@ -109,6 +113,7 @@ static struct mig_cmd_args { [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" }, [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" }, [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" }, + [MIG_CMD_SWITCHOVER_START] = { .len = 0, .name = "SWITCHOVER_START" }, [MIG_CMD_MAX] = { .len = -1, .name = "MAX" }, }; @@ -130,6 +135,35 @@ static struct mig_cmd_args { */ /***********************************************************/ +/* Optional load threads pool support */ + +static void qemu_loadvm_thread_pool_create(MigrationIncomingState *mis) +{ + assert(!mis->load_threads); + mis->load_threads = thread_pool_new(); + mis->load_threads_abort = false; +} + +static void qemu_loadvm_thread_pool_destroy(MigrationIncomingState *mis) +{ + qatomic_set(&mis->load_threads_abort, true); + + bql_unlock(); /* Load threads might be waiting for BQL */ + g_clear_pointer(&mis->load_threads, thread_pool_free); + bql_lock(); +} + +static bool qemu_loadvm_thread_pool_wait(MigrationState *s, + MigrationIncomingState *mis) +{ + bql_unlock(); /* Let load threads do work requiring BQL */ + thread_pool_wait(mis->load_threads); + bql_lock(); + + return !migrate_has_error(s); +} + +/***********************************************************/ /* savevm/loadvm support */ static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable) @@ -232,7 +266,7 @@ typedef struct SaveState { static SaveState savevm_state = { .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers), - .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL }, + .handler_pri_head = { [0 ... MIG_PRI_MAX] = NULL }, .global_section_id = 0, }; @@ -306,7 +340,7 @@ static int configuration_pre_load(void *opaque) * predates the variable-target-page-bits support and is using the * minimum possible value for this CPU. */ - state->target_page_bits = qemu_target_page_bits_min(); + state->target_page_bits = migration_legacy_page_bits(); return 0; } @@ -429,8 +463,7 @@ static const VMStateInfo vmstate_info_capability = { */ static bool vmstate_target_page_bits_needed(void *opaque) { - return qemu_target_page_bits() - > qemu_target_page_bits_min(); + return qemu_target_page_bits() > migration_legacy_page_bits(); } static const VMStateDescription vmstate_target_page_bits = { @@ -704,7 +737,7 @@ static int calculate_compat_instance_id(const char *idstr) static inline MigrationPriority save_state_priority(SaveStateEntry *se) { - if (se->vmsd) { + if (se->vmsd && se->vmsd->priority) { return se->vmsd->priority; } return MIG_PRI_DEFAULT; @@ -860,23 +893,6 @@ static void vmstate_check(const VMStateDescription *vmsd) } } -/* - * See comment in hw/intc/xics.c:icp_realize() - * - * This function can be removed when - * pre_2_10_vmstate_register_dummy_icp() is removed. - */ -int vmstate_replace_hack_for_ppc(VMStateIf *obj, int instance_id, - const VMStateDescription *vmsd, - void *opaque) -{ - SaveStateEntry *se = find_se(vmsd->name, instance_id); - - if (se) { - savevm_state_handler_remove(se); - } - return vmstate_register(obj, instance_id, vmsd, opaque); -} int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id, const VMStateDescription *vmsd, @@ -1218,6 +1234,19 @@ void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name) qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf); } +static void qemu_savevm_send_switchover_start(QEMUFile *f) +{ + trace_savevm_send_switchover_start(); + qemu_savevm_command_send(f, MIG_CMD_SWITCHOVER_START, 0, NULL); +} + +void qemu_savevm_maybe_send_switchover_start(QEMUFile *f) +{ + if (migrate_send_switchover_start()) { + qemu_savevm_send_switchover_start(f); + } +} + bool qemu_savevm_state_blocked(Error **errp) { SaveStateEntry *se; @@ -1248,8 +1277,7 @@ void qemu_savevm_non_migratable_list(strList **reasons) void qemu_savevm_state_header(QEMUFile *f) { MigrationState *s = migrate_get_current(); - - s->vmdesc = json_writer_new(false); + JSONWriter *vmdesc = s->vmdesc; trace_savevm_state_header(); qemu_put_be32(f, QEMU_VM_FILE_MAGIC); @@ -1258,16 +1286,21 @@ void qemu_savevm_state_header(QEMUFile *f) if (s->send_configuration) { qemu_put_byte(f, QEMU_VM_CONFIGURATION); - /* - * This starts the main json object and is paired with the - * json_writer_end_object in - * qemu_savevm_state_complete_precopy_non_iterable - */ - json_writer_start_object(s->vmdesc, NULL); + if (vmdesc) { + /* + * This starts the main json object and is paired with the + * json_writer_end_object in + * qemu_savevm_state_complete_precopy_non_iterable + */ + json_writer_start_object(vmdesc, NULL); + json_writer_start_object(vmdesc, "configuration"); + } - json_writer_start_object(s->vmdesc, "configuration"); - vmstate_save_state(f, &vmstate_configuration, &savevm_state, s->vmdesc); - json_writer_end_object(s->vmdesc); + vmstate_save_state(f, &vmstate_configuration, &savevm_state, vmdesc); + + if (vmdesc) { + json_writer_end_object(vmdesc); + } } } @@ -1313,16 +1346,19 @@ int qemu_savevm_state_setup(QEMUFile *f, Error **errp) { ERRP_GUARD(); MigrationState *ms = migrate_get_current(); + JSONWriter *vmdesc = ms->vmdesc; SaveStateEntry *se; int ret = 0; - json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size()); - json_writer_start_array(ms->vmdesc, "devices"); + if (vmdesc) { + json_writer_int64(vmdesc, "page_size", qemu_target_page_size()); + json_writer_start_array(vmdesc, "devices"); + } trace_savevm_state_setup(); QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (se->vmsd && se->vmsd->early_setup) { - ret = vmstate_save(f, se, ms->vmdesc, errp); + ret = vmstate_save(f, se, vmdesc, errp); if (ret) { migrate_set_error(ms, *errp); qemu_file_set_error(f, ret); @@ -1441,11 +1477,11 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) return all_finished; } -static bool should_send_vmdesc(void) +bool should_send_vmdesc(void) { MachineState *machine = MACHINE(qdev_get_machine()); - bool in_postcopy = migration_in_postcopy(); - return !machine->suppress_vmdesc && !in_postcopy; + + return !machine->suppress_vmdesc; } /* @@ -1487,12 +1523,62 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f) qemu_fflush(f); } -static +bool qemu_savevm_state_postcopy_prepare(QEMUFile *f, Error **errp) +{ + SaveStateEntry *se; + bool ret; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + if (!se->ops || !se->ops->save_postcopy_prepare) { + continue; + } + + if (se->ops->is_active) { + if (!se->ops->is_active(se->opaque)) { + continue; + } + } + + trace_savevm_section_start(se->idstr, se->section_id); + + save_section_header(f, se, QEMU_VM_SECTION_PART); + ret = se->ops->save_postcopy_prepare(f, se->opaque, errp); + save_section_footer(f, se); + + trace_savevm_section_end(se->idstr, se->section_id, ret); + + if (!ret) { + assert(*errp); + return false; + } + } + + return true; +} + int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) { int64_t start_ts_each, end_ts_each; SaveStateEntry *se; int ret; + bool multifd_device_state = multifd_device_state_supported(); + + if (multifd_device_state) { + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + SaveLiveCompletePrecopyThreadHandler hdlr; + + if (!se->ops || (in_postcopy && se->ops->has_postcopy && + se->ops->has_postcopy(se->opaque)) || + !se->ops->save_live_complete_precopy_thread) { + continue; + } + + hdlr = se->ops->save_live_complete_precopy_thread; + multifd_spawn_device_state_save_thread(hdlr, + se->idstr, se->instance_id, + se->opaque); + } + } QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (!se->ops || @@ -1518,21 +1604,39 @@ int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy) save_section_footer(f, se); if (ret < 0) { qemu_file_set_error(f, ret); - return -1; + goto ret_fail_abort_threads; } end_ts_each = qemu_clock_get_us(QEMU_CLOCK_REALTIME); trace_vmstate_downtime_save("iterable", se->idstr, se->instance_id, end_ts_each - start_ts_each); } + if (multifd_device_state) { + if (migrate_has_error(migrate_get_current())) { + multifd_abort_device_state_save_threads(); + } + + if (!multifd_join_device_state_save_threads()) { + qemu_file_set_error(f, -EINVAL); + return -1; + } + } + trace_vmstate_downtime_checkpoint("src-iterable-saved"); return 0; + +ret_fail_abort_threads: + if (multifd_device_state) { + multifd_abort_device_state_save_threads(); + multifd_join_device_state_save_threads(); + } + + return -1; } int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, - bool in_postcopy, - bool inactivate_disks) + bool in_postcopy) { MigrationState *ms = migrate_get_current(); int64_t start_ts_each, end_ts_each; @@ -1542,6 +1646,9 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, Error *local_err = NULL; int ret; + /* Making sure cpu states are synchronized before saving non-iterable */ + cpu_synchronize_all_states(); + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (se->vmsd && se->vmsd->early_setup) { /* Already saved during qemu_savevm_state_setup(). */ @@ -1563,76 +1670,42 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, end_ts_each - start_ts_each); } - if (inactivate_disks) { - /* Inactivate before sending QEMU_VM_EOF so that the - * bdrv_activate_all() on the other end won't fail. */ - ret = bdrv_inactivate_all(); - if (ret) { - error_setg(&local_err, "%s: bdrv_inactivate_all() failed (%d)", - __func__, ret); - migrate_set_error(ms, local_err); - error_report_err(local_err); - qemu_file_set_error(f, ret); - return ret; - } - } if (!in_postcopy) { /* Postcopy stream will still be going */ qemu_put_byte(f, QEMU_VM_EOF); - } - json_writer_end_array(vmdesc); - json_writer_end_object(vmdesc); - vmdesc_len = strlen(json_writer_get(vmdesc)); + if (vmdesc) { + json_writer_end_array(vmdesc); + json_writer_end_object(vmdesc); + vmdesc_len = strlen(json_writer_get(vmdesc)); - if (should_send_vmdesc()) { - qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); - qemu_put_be32(f, vmdesc_len); - qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); + qemu_put_byte(f, QEMU_VM_VMDESCRIPTION); + qemu_put_be32(f, vmdesc_len); + qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len); + } } - /* Free it now to detect any inconsistencies. */ - json_writer_free(vmdesc); - ms->vmdesc = NULL; - trace_vmstate_downtime_checkpoint("src-non-iterable-saved"); return 0; } -int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only, - bool inactivate_disks) +int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) { int ret; - Error *local_err = NULL; - bool in_postcopy = migration_in_postcopy(); - if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) { - error_report_err(local_err); + ret = qemu_savevm_state_complete_precopy_iterable(f, false); + if (ret) { + return ret; } - trace_savevm_state_complete_precopy(); - - cpu_synchronize_all_states(); - - if (!in_postcopy || iterable_only) { - ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy); + if (!iterable_only) { + ret = qemu_savevm_state_complete_precopy_non_iterable(f, false); if (ret) { return ret; } } - if (iterable_only) { - goto flush; - } - - ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy, - inactivate_disks); - if (ret) { - return ret; - } - -flush: return qemu_fflush(f); } @@ -1730,7 +1803,8 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) ret = qemu_file_get_error(f); if (ret == 0) { - qemu_savevm_state_complete_precopy(f, false, false); + qemu_savevm_maybe_send_switchover_start(f); + qemu_savevm_state_complete_precopy(f, false); ret = qemu_file_get_error(f); } if (ret != 0) { @@ -1756,7 +1830,7 @@ cleanup: void qemu_savevm_live_state(QEMUFile *f) { /* save QEMU_VM_SECTION_END section */ - qemu_savevm_state_complete_precopy(f, true, false); + qemu_savevm_state_complete_precopy(f, true); qemu_put_byte(f, QEMU_VM_EOF); } @@ -2004,7 +2078,7 @@ static void *postcopy_ram_listen_thread(void *opaque) migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_POSTCOPY_ACTIVE); - qemu_sem_post(&mis->thread_sync_sem); + qemu_event_set(&mis->thread_sync_event); trace_postcopy_ram_listen_thread_start(); rcu_register_thread(); @@ -2013,6 +2087,8 @@ static void *postcopy_ram_listen_thread(void *opaque) * in qemu_file, and thus we must be blocking now. */ qemu_file_set_blocking(f, true); + + /* TODO: sanity check that only postcopiable data will be loaded here */ load_res = qemu_loadvm_state_main(f, mis); /* @@ -2073,8 +2149,9 @@ static void *postcopy_ram_listen_thread(void *opaque) * (If something broke then qemu will have to exit anyway since it's * got a bad migration state). */ + bql_lock(); migration_incoming_state_destroy(); - qemu_loadvm_state_cleanup(); + bql_unlock(); rcu_unregister_thread(); mis->have_listen_thread = false; @@ -2129,7 +2206,8 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) } mis->have_listen_thread = true; - postcopy_thread_create(mis, &mis->listen_thread, "mig/dst/listen", + postcopy_thread_create(mis, &mis->listen_thread, + MIGRATION_THREAD_DST_LISTEN, postcopy_ram_listen_thread, QEMU_THREAD_DETACHED); trace_loadvm_postcopy_handle_listen("return"); @@ -2138,7 +2216,6 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) static void loadvm_postcopy_handle_run_bh(void *opaque) { - Error *local_err = NULL; MigrationIncomingState *mis = opaque; trace_vmstate_downtime_checkpoint("dst-postcopy-bh-enter"); @@ -2154,22 +2231,20 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) trace_vmstate_downtime_checkpoint("dst-postcopy-bh-announced"); - /* Make sure all file formats throw away their mutable metadata. - * If we get an error here, just don't restart the VM yet. */ - bdrv_activate_all(&local_err); - if (local_err) { - error_report_err(local_err); - local_err = NULL; - autostart = false; - } - - trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated"); - dirty_bitmap_mig_before_vm_start(); if (autostart) { - /* Hold onto your hats, starting the CPU */ - vm_start(); + /* + * Make sure all file formats throw away their mutable metadata. + * If we get an error here, just don't restart the VM yet. + */ + bool success = migration_block_activate(NULL); + + trace_vmstate_downtime_checkpoint("dst-postcopy-bh-cache-invalidated"); + + if (success) { + vm_start(); + } } else { /* leave it paused and let management decide when to start the CPU */ runstate_set(RUN_STATE_PAUSED); @@ -2429,6 +2504,26 @@ static int loadvm_process_enable_colo(MigrationIncomingState *mis) return ret; } +static int loadvm_postcopy_handle_switchover_start(void) +{ + SaveStateEntry *se; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + int ret; + + if (!se->ops || !se->ops->switchover_start) { + continue; + } + + ret = se->ops->switchover_start(se->opaque); + if (ret < 0) { + return ret; + } + } + + return 0; +} + /* * Process an incoming 'QEMU_VM_COMMAND' * 0 just a normal return @@ -2527,6 +2622,9 @@ static int loadvm_process_command(QEMUFile *f) case MIG_CMD_ENABLE_COLO: return loadvm_process_enable_colo(mis); + + case MIG_CMD_SWITCHOVER_START: + return loadvm_postcopy_handle_switchover_start(); } return 0; @@ -2576,8 +2674,7 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) } static int -qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis, - uint8_t type) +qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) { bool trace_downtime = (type == QEMU_VM_SECTION_FULL); uint32_t instance_id, version_id, section_id; @@ -2655,8 +2752,7 @@ qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis, } static int -qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis, - uint8_t type) +qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) { bool trace_downtime = (type == QEMU_VM_SECTION_END); int64_t start_ts, end_ts; @@ -2732,13 +2828,11 @@ static int qemu_loadvm_state_header(QEMUFile *f) if (migrate_get_current()->send_configuration) { if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { error_report("Configuration section missing"); - qemu_loadvm_state_cleanup(); return -EINVAL; } ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); if (ret) { - qemu_loadvm_state_cleanup(); return ret; } } @@ -2790,16 +2884,68 @@ static int qemu_loadvm_state_setup(QEMUFile *f, Error **errp) return 0; } -void qemu_loadvm_state_cleanup(void) +struct LoadThreadData { + MigrationLoadThread function; + void *opaque; +}; + +static int qemu_loadvm_load_thread(void *thread_opaque) +{ + struct LoadThreadData *data = thread_opaque; + MigrationIncomingState *mis = migration_incoming_get_current(); + g_autoptr(Error) local_err = NULL; + + if (!data->function(data->opaque, &mis->load_threads_abort, &local_err)) { + MigrationState *s = migrate_get_current(); + + /* + * Can't set load_threads_abort here since processing of main migration + * channel data could still be happening, resulting in launching of new + * load threads. + */ + + assert(local_err); + + /* + * In case of multiple load threads failing which thread error + * return we end setting is purely arbitrary. + */ + migrate_set_error(s, local_err); + } + + return 0; +} + +void qemu_loadvm_start_load_thread(MigrationLoadThread function, + void *opaque) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + struct LoadThreadData *data; + + /* We only set it from this thread so it's okay to read it directly */ + assert(!mis->load_threads_abort); + + data = g_new(struct LoadThreadData, 1); + data->function = function; + data->opaque = opaque; + + thread_pool_submit_immediate(mis->load_threads, qemu_loadvm_load_thread, + data, g_free); +} + +void qemu_loadvm_state_cleanup(MigrationIncomingState *mis) { SaveStateEntry *se; trace_loadvm_state_cleanup(); + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { if (se->ops && se->ops->load_cleanup) { se->ops->load_cleanup(se->opaque); } } + + qemu_loadvm_thread_pool_destroy(mis); } /* Return true if we should continue the migration, or false. */ @@ -2891,14 +3037,14 @@ retry: switch (section_type) { case QEMU_VM_SECTION_START: case QEMU_VM_SECTION_FULL: - ret = qemu_loadvm_section_start_full(f, mis, section_type); + ret = qemu_loadvm_section_start_full(f, section_type); if (ret < 0) { goto out; } break; case QEMU_VM_SECTION_PART: case QEMU_VM_SECTION_END: - ret = qemu_loadvm_section_part_end(f, mis, section_type); + ret = qemu_loadvm_section_part_end(f, section_type); if (ret < 0) { goto out; } @@ -2950,6 +3096,7 @@ out: int qemu_loadvm_state(QEMUFile *f) { + MigrationState *s = migrate_get_current(); MigrationIncomingState *mis = migration_incoming_get_current(); Error *local_err = NULL; int ret; @@ -2959,6 +3106,8 @@ int qemu_loadvm_state(QEMUFile *f) return -EINVAL; } + qemu_loadvm_thread_pool_create(mis); + ret = qemu_loadvm_state_header(f); if (ret) { return ret; @@ -2981,13 +3130,27 @@ int qemu_loadvm_state(QEMUFile *f) trace_qemu_loadvm_state_post_main(ret); if (mis->have_listen_thread) { - /* Listen thread still going, can't clean up yet */ + /* + * Postcopy listen thread still going, don't synchronize the + * cpus yet. + */ return ret; } + /* When reaching here, it must be precopy */ if (ret == 0) { - ret = qemu_file_get_error(f); + if (migrate_has_error(migrate_get_current()) || + !qemu_loadvm_thread_pool_wait(s, mis)) { + ret = -EINVAL; + } else { + ret = qemu_file_get_error(f); + } } + /* + * Set this flag unconditionally so we'll catch further attempts to + * start additional threads via an appropriate assert() + */ + qatomic_set(&mis->load_threads_abort, true); /* * Try to read in the VMDESC section as well, so that dumping tools that @@ -3024,7 +3187,6 @@ int qemu_loadvm_state(QEMUFile *f) } } - qemu_loadvm_state_cleanup(); cpu_synchronize_all_post_init(); return ret; @@ -3064,6 +3226,29 @@ int qemu_loadvm_approve_switchover(void) return migrate_send_rp_switchover_ack(mis); } +bool qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id, + char *buf, size_t len, Error **errp) +{ + SaveStateEntry *se; + + se = find_se(idstr, instance_id); + if (!se) { + error_setg(errp, + "Unknown idstr %s or instance id %u for load state buffer", + idstr, instance_id); + return false; + } + + if (!se->ops || !se->ops->load_state_buffer) { + error_setg(errp, + "idstr %s / instance %u has no load state buffer operation", + idstr, instance_id); + return false; + } + + return se->ops->load_state_buffer(se->opaque, buf, len, errp); +} + bool save_snapshot(const char *name, bool overwrite, const char *vmstate, bool has_devices, strList *devices, Error **errp) { @@ -3211,11 +3396,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, * side of the migration take control of the images. */ if (live && !saved_vm_running) { - ret = bdrv_inactivate_all(); - if (ret) { - error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)", - __func__, ret); - } + migration_block_inactivate(); } } @@ -3286,6 +3467,7 @@ bool load_snapshot(const char *name, const char *vmstate, /* Don't even try to load empty VM states */ ret = bdrv_snapshot_find(bs_vm_state, &sn, name); if (ret < 0) { + error_setg(errp, "Snapshot can not be found"); return false; } else if (sn.vm_state_size == 0) { error_setg(errp, "This is a disk-only snapshot. Revert to it " @@ -3365,12 +3547,14 @@ void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev) qemu_ram_set_idstr(mr->ram_block, memory_region_name(mr), dev); qemu_ram_set_migratable(mr->ram_block); + ram_block_add_cpr_blocker(mr->ram_block, &error_fatal); } void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev) { qemu_ram_unset_idstr(mr->ram_block); qemu_ram_unset_migratable(mr->ram_block); + ram_block_del_cpr_blocker(mr->ram_block); } void vmstate_register_ram_global(MemoryRegion *mr) |