From 46036b2462c7ff56c0af6466ea6b9248197a38a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?An=C3=ADbal=20Lim=C3=B3n?= Date: Thu, 3 Sep 2015 15:48:33 -0500 Subject: cpus.c: qemu_mutex_lock_iothread fix race condition at cpu thread init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When QEMU starts the RCU thread executes qemu_mutex_lock_thread causing error "qemu:qemu_cpu_kick_thread: No such process" and exits. This isn't occur frequently but in glibc the thread id can exist and this not guarantee that the thread is on active/running state. If is inserted a sleep(1) after newthread assignment [1] the issue appears. So not make assumption that thread exist if first_cpu->thread is set then change the validation of cpu to created that is set into cpu threads (kvm, tcg, dummy). [1] https://sourceware.org/git/?p=glibc.git;a=blob;f=nptl/pthread_create.c;h=d10f4ea8004e1d8f3a268b95cc0f8d93b8d89867;hb=HEAD#l621 Cc: qemu-stable@nongnu.org Signed-off-by: Aníbal Limón Message-Id: <1441313313-3040-1-git-send-email-anibal.limon@linux.intel.com> Signed-off-by: Paolo Bonzini --- cpus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index c1e74d9..e831aa3 100644 --- a/cpus.c +++ b/cpus.c @@ -1166,7 +1166,7 @@ void qemu_mutex_lock_iothread(void) * TCG code execution. */ if (!tcg_enabled() || qemu_in_vcpu_thread() || - !first_cpu || !first_cpu->thread) { + !first_cpu || !first_cpu->created) { qemu_mutex_lock(&qemu_global_mutex); atomic_dec(&iothread_requesting_mutex); } else { -- cgit v1.1 From 9373e63297c43752f9cf085feb7f5aed57d959f8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 18 Aug 2015 06:24:34 -0700 Subject: tcg: introduce tcg_current_cpu This is already useful on Windows in order to remove tls.h, because accesses to current_cpu are done from a different thread on that platform. It will be used on POSIX platforms as soon TCG stops using signals to interrupt the execution of translated code. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- cpus.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index e831aa3..6cebb7a 100644 --- a/cpus.c +++ b/cpus.c @@ -663,8 +663,9 @@ static void cpu_handle_guest_debug(CPUState *cpu) static void cpu_signal(int sig) { - if (current_cpu) { - cpu_exit(current_cpu); + CPUState *cpu = atomic_mb_read(&tcg_current_cpu); + if (cpu) { + cpu_exit(cpu); } exit_request = 1; } -- cgit v1.1 From aed807c8e2bf009b2c6a35490d4fd4383887221d Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 18 Aug 2015 06:43:15 -0700 Subject: tcg: synchronize exit_request and tcg_current_cpu accesses Synchronize the remaining pair of accesses in cpu_signal. These should be necessary on Windows as well, at least in theory. Probably SuspendProcess and ResumeProcess introduce some implicit memory barrier. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- cpus.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index 6cebb7a..dd2fc29 100644 --- a/cpus.c +++ b/cpus.c @@ -663,11 +663,15 @@ static void cpu_handle_guest_debug(CPUState *cpu) static void cpu_signal(int sig) { - CPUState *cpu = atomic_mb_read(&tcg_current_cpu); + CPUState *cpu; + /* Ensure whatever caused the exit has reached the CPU threads before + * writing exit_request. + */ + atomic_mb_set(&exit_request, 1); + cpu = atomic_mb_read(&tcg_current_cpu); if (cpu) { cpu_exit(cpu); } - exit_request = 1; } #ifdef CONFIG_LINUX @@ -1063,7 +1067,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } /* process any pending work */ - exit_request = 1; + atomic_mb_set(&exit_request, 1); while (1) { tcg_exec_all(); @@ -1441,7 +1445,9 @@ static void tcg_exec_all(void) break; } } - exit_request = 0; + + /* Pairs with smp_wmb in qemu_cpu_kick. */ + atomic_mb_set(&exit_request, 0); } void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) -- cgit v1.1 From 9102dedaa1ee1e89ce4a81283c403ff4928e9ef9 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 18 Aug 2015 06:52:09 -0700 Subject: use qemu_cpu_kick instead of cpu_exit or qemu_cpu_kick_thread Use the same API to trigger interruption of a CPU, no matter if under TCG or KVM. There is no difference: these calls come from the CPU thread, so the qemu_cpu_kick calls will send a signal to the running thread and it will be processed synchronously, just like a call to cpu_exit. The only difference is in the overhead, but neither call to cpu_exit (now qemu_cpu_kick) is in a hot path. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- cpus.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index dd2fc29..e407910 100644 --- a/cpus.c +++ b/cpus.c @@ -1090,6 +1090,12 @@ static void qemu_cpu_kick_thread(CPUState *cpu) #ifndef _WIN32 int err; + if (!tcg_enabled()) { + if (cpu->thread_kicked) { + return; + } + cpu->thread_kicked = true; + } err = pthread_kill(cpu->thread->thread, SIG_IPI); if (err) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); @@ -1127,21 +1133,14 @@ static void qemu_cpu_kick_thread(CPUState *cpu) void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); - if (!tcg_enabled() && !cpu->thread_kicked) { - qemu_cpu_kick_thread(cpu); - cpu->thread_kicked = true; - } + qemu_cpu_kick_thread(cpu); } void qemu_cpu_kick_self(void) { #ifndef _WIN32 assert(current_cpu); - - if (!current_cpu->thread_kicked) { - qemu_cpu_kick_thread(current_cpu); - current_cpu->thread_kicked = true; - } + qemu_cpu_kick_thread(current_cpu); #else abort(); #endif -- cgit v1.1 From e0c382113f768cc375a0d61b7cb3692f1b4bba58 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 26 Aug 2015 00:19:19 +0200 Subject: tcg: signal-free qemu_cpu_kick Signals are slow and do not exist on Win32. The previous patches have done most of the legwork to introduce memory barriers (some of them were even there already for the sake of Windows!) and we can now set the flags directly in the iothread. qemu_cpu_kick_thread is not used anymore on TCG, since the TCG thread is never outside usermode while the CPU is running (not halted). Instead run the content of the signal handler (now in qemu_cpu_kick_no_halt) directly. qemu_cpu_kick_no_halt is also used in qemu_mutex_lock_iothread to avoid the overhead of qemu_cond_broadcast. Reviewed-by: Richard Henderson Signed-off-by: Paolo Bonzini --- cpus.c | 89 +++++++++++++++++------------------------------------------------- 1 file changed, 22 insertions(+), 67 deletions(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index e407910..4f3374e 100644 --- a/cpus.c +++ b/cpus.c @@ -661,19 +661,6 @@ static void cpu_handle_guest_debug(CPUState *cpu) cpu->stopped = true; } -static void cpu_signal(int sig) -{ - CPUState *cpu; - /* Ensure whatever caused the exit has reached the CPU threads before - * writing exit_request. - */ - atomic_mb_set(&exit_request, 1); - cpu = atomic_mb_read(&tcg_current_cpu); - if (cpu) { - cpu_exit(cpu); - } -} - #ifdef CONFIG_LINUX static void sigbus_reraise(void) { @@ -786,29 +773,11 @@ static void qemu_kvm_init_cpu_signals(CPUState *cpu) } } -static void qemu_tcg_init_cpu_signals(void) -{ - sigset_t set; - struct sigaction sigact; - - memset(&sigact, 0, sizeof(sigact)); - sigact.sa_handler = cpu_signal; - sigaction(SIG_IPI, &sigact, NULL); - - sigemptyset(&set); - sigaddset(&set, SIG_IPI); - pthread_sigmask(SIG_UNBLOCK, &set, NULL); -} - #else /* _WIN32 */ static void qemu_kvm_init_cpu_signals(CPUState *cpu) { abort(); } - -static void qemu_tcg_init_cpu_signals(void) -{ -} #endif /* _WIN32 */ static QemuMutex qemu_global_mutex; @@ -1046,7 +1015,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) rcu_register_thread(); qemu_mutex_lock_iothread(); - qemu_tcg_init_cpu_signals(); qemu_thread_get_self(cpu->thread); CPU_FOREACH(cpu) { @@ -1090,60 +1058,47 @@ static void qemu_cpu_kick_thread(CPUState *cpu) #ifndef _WIN32 int err; - if (!tcg_enabled()) { - if (cpu->thread_kicked) { - return; - } - cpu->thread_kicked = true; + if (cpu->thread_kicked) { + return; } + cpu->thread_kicked = true; err = pthread_kill(cpu->thread->thread, SIG_IPI); if (err) { fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); exit(1); } #else /* _WIN32 */ - if (!qemu_cpu_is_self(cpu)) { - CONTEXT tcgContext; - - if (SuspendThread(cpu->hThread) == (DWORD)-1) { - fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__, - GetLastError()); - exit(1); - } - - /* On multi-core systems, we are not sure that the thread is actually - * suspended until we can get the context. - */ - tcgContext.ContextFlags = CONTEXT_CONTROL; - while (GetThreadContext(cpu->hThread, &tcgContext) != 0) { - continue; - } - - cpu_signal(0); + abort(); +#endif +} - if (ResumeThread(cpu->hThread) == (DWORD)-1) { - fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__, - GetLastError()); - exit(1); - } +static void qemu_cpu_kick_no_halt(void) +{ + CPUState *cpu; + /* Ensure whatever caused the exit has reached the CPU threads before + * writing exit_request. + */ + atomic_mb_set(&exit_request, 1); + cpu = atomic_mb_read(&tcg_current_cpu); + if (cpu) { + cpu_exit(cpu); } -#endif } void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); - qemu_cpu_kick_thread(cpu); + if (tcg_enabled()) { + qemu_cpu_kick_no_halt(); + } else { + qemu_cpu_kick_thread(cpu); + } } void qemu_cpu_kick_self(void) { -#ifndef _WIN32 assert(current_cpu); qemu_cpu_kick_thread(current_cpu); -#else - abort(); -#endif } bool qemu_cpu_is_self(CPUState *cpu) @@ -1175,7 +1130,7 @@ void qemu_mutex_lock_iothread(void) atomic_dec(&iothread_requesting_mutex); } else { if (qemu_mutex_trylock(&qemu_global_mutex)) { - qemu_cpu_kick_thread(first_cpu); + qemu_cpu_kick_no_halt(); qemu_mutex_lock(&qemu_global_mutex); } atomic_dec(&iothread_requesting_mutex); -- cgit v1.1 From 376692b9dc6f02303ee07a4146d08d8727d79c0c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 10 Jul 2015 12:32:32 +0200 Subject: cpus: protect work list with work_mutex Protect the list of queued work items with something other than the BQL, as a preparation for running the work items outside it. Reviewed-by: Peter Maydell Signed-off-by: KONRAD Frederic Signed-off-by: Paolo Bonzini --- cpus.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index 4f3374e..ea3ffdb 100644 --- a/cpus.c +++ b/cpus.c @@ -819,6 +819,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) wi.func = func; wi.data = data; wi.free = false; + + qemu_mutex_lock(&cpu->work_mutex); if (cpu->queued_work_first == NULL) { cpu->queued_work_first = &wi; } else { @@ -827,9 +829,10 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) cpu->queued_work_last = &wi; wi.next = NULL; wi.done = false; + qemu_mutex_unlock(&cpu->work_mutex); qemu_cpu_kick(cpu); - while (!wi.done) { + while (!atomic_mb_read(&wi.done)) { CPUState *self_cpu = current_cpu; qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); @@ -850,6 +853,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) wi->func = func; wi->data = data; wi->free = true; + + qemu_mutex_lock(&cpu->work_mutex); if (cpu->queued_work_first == NULL) { cpu->queued_work_first = wi; } else { @@ -858,6 +863,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) cpu->queued_work_last = wi; wi->next = NULL; wi->done = false; + qemu_mutex_unlock(&cpu->work_mutex); qemu_cpu_kick(cpu); } @@ -870,15 +876,23 @@ static void flush_queued_work(CPUState *cpu) return; } - while ((wi = cpu->queued_work_first)) { + qemu_mutex_lock(&cpu->work_mutex); + while (cpu->queued_work_first != NULL) { + wi = cpu->queued_work_first; cpu->queued_work_first = wi->next; + if (!cpu->queued_work_first) { + cpu->queued_work_last = NULL; + } + qemu_mutex_unlock(&cpu->work_mutex); wi->func(wi->data); - wi->done = true; + qemu_mutex_lock(&cpu->work_mutex); if (wi->free) { g_free(wi); + } else { + atomic_mb_set(&wi->done, true); } } - cpu->queued_work_last = NULL; + qemu_mutex_unlock(&cpu->work_mutex); qemu_cond_broadcast(&qemu_work_cond); } -- cgit v1.1 From d5f8d61390de8f2acc0da93f184e421a709cb503 Mon Sep 17 00:00:00 2001 From: KONRAD Frederic Date: Mon, 10 Aug 2015 17:27:06 +0200 Subject: cpus: remove tcg_halt_cond and tcg_cpu_thread globals This hides the tcg_halt_cond and tcg_cpu_thread global variables inside qemu_tcg_init_vcpu. Multi-threaded TCG will need one QemuCond and one QemuThread per virtual cpu, so it's preferrable to use cpu->halt_cond and cpu->thread. Signed-off-by: KONRAD Frederic Message-Id: <1439220437-23957-9-git-send-email-fred.konrad@greensocs.com> Signed-off-by: Paolo Bonzini --- cpus.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'cpus.c') diff --git a/cpus.c b/cpus.c index ea3ffdb..dddd056 100644 --- a/cpus.c +++ b/cpus.c @@ -786,9 +786,6 @@ static unsigned iothread_requesting_mutex; static QemuThread io_thread; -static QemuThread *tcg_cpu_thread; -static QemuCond *tcg_halt_cond; - /* cpu creation */ static QemuCond qemu_cpu_cond; /* system init */ @@ -907,15 +904,13 @@ static void qemu_wait_io_event_common(CPUState *cpu) cpu->thread_kicked = false; } -static void qemu_tcg_wait_io_event(void) +static void qemu_tcg_wait_io_event(CPUState *cpu) { - CPUState *cpu; - while (all_cpu_threads_idle()) { /* Start accounting real time to the virtual clock if the CPUs are idle. */ qemu_clock_warp(QEMU_CLOCK_VIRTUAL); - qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); + qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); } while (iothread_requesting_mutex) { @@ -1040,7 +1035,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) /* wait for initial kick-off after machine start */ while (first_cpu->stopped) { - qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); + qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); /* process any pending work */ CPU_FOREACH(cpu) { @@ -1061,7 +1056,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) qemu_clock_notify(QEMU_CLOCK_VIRTUAL); } } - qemu_tcg_wait_io_event(); + qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus)); } return NULL; @@ -1224,6 +1219,8 @@ void resume_all_vcpus(void) static void qemu_tcg_init_vcpu(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; + static QemuCond *tcg_halt_cond; + static QemuThread *tcg_cpu_thread; tcg_cpu_address_space_init(cpu, cpu->as); -- cgit v1.1