diff options
author | Emilio G. Cota <cota@braap.org> | 2017-07-14 17:56:30 -0400 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2017-10-24 13:53:42 -0700 |
commit | ac03ee5331612e44beb393df2b578c951d27dc0d (patch) | |
tree | 6164d1ecfb7d0451dcce30bc0aeb6c822c735464 /accel | |
parent | e82d5a2460b0e176128027651ff9b104e4bdf5cc (diff) | |
download | qemu-ac03ee5331612e44beb393df2b578c951d27dc0d.zip qemu-ac03ee5331612e44beb393df2b578c951d27dc0d.tar.gz qemu-ac03ee5331612e44beb393df2b578c951d27dc0d.tar.bz2 |
cpu-exec: lookup/generate TB outside exclusive region during step_atomic
Now that all code generation has been converted to check CF_PARALLEL, we can
generate !CF_PARALLEL code without having yet set !parallel_cpus --
and therefore without having to be in the exclusive region during
cpu_exec_step_atomic.
While at it, merge cpu_exec_step into cpu_exec_step_atomic.
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r-- | accel/tcg/cpu-exec.c | 30 |
1 files changed, 14 insertions, 16 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 1c64977..849b54d 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -223,30 +223,40 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, } #endif -static void cpu_exec_step(CPUState *cpu) +void cpu_exec_step_atomic(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; uint32_t cflags = 1 | CF_IGNORE_ICOUNT; + uint32_t cf_mask = cflags & CF_HASH_MASK; if (sigsetjmp(cpu->jmp_env, 0) == 0) { - tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, - cflags & CF_HASH_MASK); + tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); if (tb == NULL) { mmap_lock(); tb_lock(); - tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + tb = tb_htable_lookup(cpu, pc, cs_base, flags, cf_mask); + if (likely(tb == NULL)) { + tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); + } tb_unlock(); mmap_unlock(); } + start_exclusive(); + + /* Since we got here, we know that parallel_cpus must be true. */ + parallel_cpus = false; cc->cpu_exec_enter(cpu); /* execute the generated code */ trace_exec_tb(tb, pc); cpu_tb_exec(cpu, tb); cc->cpu_exec_exit(cpu); + parallel_cpus = true; + + end_exclusive(); } else { /* We may have exited due to another problem here, so we need * to reset any tb_locks we may have taken but didn't release. @@ -260,18 +270,6 @@ static void cpu_exec_step(CPUState *cpu) } } -void cpu_exec_step_atomic(CPUState *cpu) -{ - start_exclusive(); - - /* Since we got here, we know that parallel_cpus must be true. */ - parallel_cpus = false; - cpu_exec_step(cpu); - parallel_cpus = true; - - end_exclusive(); -} - struct tb_desc { target_ulong pc; target_ulong cs_base; |