aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2021-11-29 14:09:26 +0000
committerAlex Bennée <alex.bennee@linaro.org>2021-11-29 15:12:37 +0000
commitaff0e204cb1f1c036a496c94c15f5dfafcd9b4b4 (patch)
tree3b7ca5c6f32f9635df139d4961c2c2aaa87d9007 /accel
parent48e14066ac10581db4e69f75eda107cfdafa6022 (diff)
downloadqemu-aff0e204cb1f1c036a496c94c15f5dfafcd9b4b4.zip
qemu-aff0e204cb1f1c036a496c94c15f5dfafcd9b4b4.tar.gz
qemu-aff0e204cb1f1c036a496c94c15f5dfafcd9b4b4.tar.bz2
accel/tcg: suppress IRQ check for special TBs
When we set cpu->cflags_next_tb it is because we want to carefully control the execution of the next TB. Currently there is a race that causes the second stage of watchpoint handling to get ignored if an IRQ is processed before we finish executing the instruction that triggers the watchpoint. Use the new CF_NOIRQ facility to avoid the race. We also suppress IRQs when handling precise self modifying code to avoid unnecessary bouncing. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Cc: Pavel Dovgalyuk <pavel.dovgalyuk@ispras.ru> Fixes: https://gitlab.com/qemu-project/qemu/-/issues/245 Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20211129140932.4115115-3-alex.bennee@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cpu-exec.c9
-rw-r--r--accel/tcg/translate-all.c4
2 files changed, 11 insertions, 2 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 2d14d02..409ec8c 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -721,6 +721,15 @@ static inline bool need_replay_interrupt(int interrupt_request)
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
+ /*
+ * If we have requested custom cflags with CF_NOIRQ we should
+ * skip checking here. Any pending interrupts will get picked up
+ * by the next TB we execute under normal cflags.
+ */
+ if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
+ return false;
+ }
+
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index bd0bb81..bd71db5 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1738,7 +1738,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
@@ -1906,7 +1906,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
return true;
}
#endif