diff options
author | Juan Quintela <quintela@redhat.com> | 2019-12-18 05:36:22 +0100 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2020-01-20 09:10:22 +0100 |
commit | 4d65a6216bfc44891ac298b74a6921d479805131 (patch) | |
tree | d4011ad42feb035f8f7b2700f378ff6e45d1a1ff /migration/ram.c | |
parent | b99784ef6c3beff2c43ea8d3bc50cdcd7903658f (diff) | |
download | qemu-4d65a6216bfc44891ac298b74a6921d479805131.zip qemu-4d65a6216bfc44891ac298b74a6921d479805131.tar.gz qemu-4d65a6216bfc44891ac298b74a6921d479805131.tar.bz2 |
migration: Make sure that we don't call write() in case of error
If we are exiting due to an error/finish/.... Just don't try to even
touch the channel with one IO operation.
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 25 |
1 files changed, 25 insertions, 0 deletions
diff --git a/migration/ram.c b/migration/ram.c index b9147bc..f946282 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -900,6 +900,12 @@ struct { uint64_t packet_num; /* send channels ready */ QemuSemaphore channels_ready; + /* + * Have we already run terminate threads. There is a race when it + * happens that we got one error while we are exiting. + * We will use atomic operations. Only valid values are 0 and 1. + */ + int exiting; } *multifd_send_state; /* @@ -928,6 +934,10 @@ static int multifd_send_pages(RAMState *rs) MultiFDPages_t *pages = multifd_send_state->pages; uint64_t transferred; + if (atomic_read(&multifd_send_state->exiting)) { + return -1; + } + qemu_sem_wait(&multifd_send_state->channels_ready); for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) { p = &multifd_send_state->params[i]; @@ -1009,6 +1019,16 @@ static void multifd_send_terminate_threads(Error *err) } } + /* + * We don't want to exit each threads twice. Depending on where + * we get the error, or if there are two independent errors in two + * threads at the same time, we can end calling this function + * twice. + */ + if (atomic_xchg(&multifd_send_state->exiting, 1)) { + return; + } + for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; @@ -1118,6 +1138,10 @@ static void *multifd_send_thread(void *opaque) while (true) { qemu_sem_wait(&p->sem); + + if (atomic_read(&multifd_send_state->exiting)) { + break; + } qemu_mutex_lock(&p->mutex); if (p->pending_job) { @@ -1224,6 +1248,7 @@ int multifd_save_setup(void) multifd_send_state->params = g_new0(MultiFDSendParams, thread_count); multifd_send_state->pages = multifd_pages_init(page_count); qemu_sem_init(&multifd_send_state->channels_ready, 0); + atomic_set(&multifd_send_state->exiting, 0); for (i = 0; i < thread_count; i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; |