aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/cpu-exec.c
diff options
context:
space:
mode:
Diffstat (limited to 'accel/tcg/cpu-exec.c')
-rw-r--r--accel/tcg/cpu-exec.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 48272c7..c01f59c 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -238,8 +238,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
uint32_t flags;
uint32_t cflags = 1;
uint32_t cf_mask = cflags & CF_HASH_MASK;
- /* volatile because we modify it between setjmp and longjmp */
- volatile bool in_exclusive_region = false;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
@@ -253,7 +251,6 @@ void cpu_exec_step_atomic(CPUState *cpu)
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
- in_exclusive_region = true;
cc->cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
@@ -271,9 +268,10 @@ void cpu_exec_step_atomic(CPUState *cpu)
qemu_mutex_unlock_iothread();
}
assert_no_pages_locked();
+ qemu_plugin_disable_mem_helpers(cpu);
}
- if (in_exclusive_region) {
+ if (cpu_in_exclusive_context(cpu)) {
/* We might longjump out of either the codegen or the
* execution, so must make sure we only end the exclusive
* region if we started it.
@@ -704,6 +702,8 @@ int cpu_exec(CPUState *cpu)
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
}
+ qemu_plugin_disable_mem_helpers(cpu);
+
assert_no_pages_locked();
}