aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2021-02-13 13:03:19 +0000
committerAlex Bennée <alex.bennee@linaro.org>2021-02-18 08:19:23 +0000
commita11bbb6a23a1bb5a4bf172f5b2739785e8f06c79 (patch)
treebdf49eb065f5bcc6379e5e7bda1b83f6fd245edb /accel
parentbc662a33514ac862efefc73d6caa4e71581ccdae (diff)
downloadqemu-a11bbb6a23a1bb5a4bf172f5b2739785e8f06c79.zip
qemu-a11bbb6a23a1bb5a4bf172f5b2739785e8f06c79.tar.gz
qemu-a11bbb6a23a1bb5a4bf172f5b2739785e8f06c79.tar.bz2
accel/tcg: cache single instruction TB on pending replay exception
Again there is no reason to jump through the nocache hoops to execute a single instruction block. We do have to add an additional wrinkle to the cpu_handle_interrupt case to ensure we let through a TB where we have specifically disabled icount for the block. As the last user of cpu_exec_nocache we can now remove the function. Further clean-up will follow in subsequent patches. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210213130325.14781-18-alex.bennee@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cpu-exec.c44
1 files changed, 4 insertions, 40 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index d24c1bd..16e4fe3 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -224,40 +224,6 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
return last_tb;
}
-#ifndef CONFIG_USER_ONLY
-/* Execute the code without caching the generated code. An interpreter
- could be used if available. */
-static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
- TranslationBlock *orig_tb, bool ignore_icount)
-{
- TranslationBlock *tb;
- uint32_t cflags = curr_cflags() | CF_NOCACHE;
- int tb_exit;
-
- if (ignore_icount) {
- cflags &= ~CF_USE_ICOUNT;
- }
-
- /* Should never happen.
- We only end up here when an existing TB is too long. */
- cflags |= MIN(max_cycles, CF_COUNT_MASK);
-
- mmap_lock();
- tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
- orig_tb->flags, cflags);
- tb->orig_tb = orig_tb;
- mmap_unlock();
-
- /* execute the generated code */
- trace_exec_tb_nocache(tb, tb->pc);
- cpu_tb_exec(cpu, tb, &tb_exit);
-
- mmap_lock();
- tb_phys_invalidate(tb, -1);
- mmap_unlock();
- tcg_tb_remove(tb);
-}
-#endif
static void cpu_exec_enter(CPUState *cpu)
{
@@ -524,15 +490,12 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
#ifndef CONFIG_USER_ONLY
if (replay_has_exception()
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
- /* try to cause an exception pending in the log */
- cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
+ /* Execute just one insn to trigger exception pending in the log */
+ cpu->cflags_next_tb = (curr_cflags() & ~CF_USE_ICOUNT) | 1;
}
#endif
- if (cpu->exception_index < 0) {
- return false;
- }
+ return false;
}
-
if (cpu->exception_index >= EXCP_INTERRUPT) {
/* exit request from the cpu execution loop */
*ret = cpu->exception_index;
@@ -688,6 +651,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
/* Finally, check if we need to exit to the main loop. */
if (unlikely(qatomic_read(&cpu->exit_request))
|| (icount_enabled()
+ && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {