aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-03-03 13:12:50 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2023-05-08 11:10:49 +0200
commit20f46806b3858b92e9d1b5cf586558d62bd5a913 (patch)
tree757482ed7bbfd33e45d60bda5952127fed0ccc04 /accel/tcg
parent8f593ba9c5c96b1790cc6aceb95b5b83bbac92cd (diff)
downloadqemu-20f46806b3858b92e9d1b5cf586558d62bd5a913.zip
qemu-20f46806b3858b92e9d1b5cf586558d62bd5a913.tar.gz
qemu-20f46806b3858b92e9d1b5cf586558d62bd5a913.tar.bz2
tb-maint: do not use mb_read/mb_set
The load side can use a relaxed load, which will surely happen before the work item is run by async_safe_run_on_cpu() or before double-checking under mmap_lock. The store side can use an atomic RMW operation. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/tb-maint.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index cb1f806..0dd173f 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -746,7 +746,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
tcg_region_reset_all();
/* XXX: flush processor icache at this point if cache flush is expensive */
- qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
+ qatomic_inc(&tb_ctx.tb_flush_count);
done:
mmap_unlock();
@@ -758,7 +758,7 @@ done:
void tb_flush(CPUState *cpu)
{
if (tcg_enabled()) {
- unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
+ unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
if (cpu_in_exclusive_context(cpu)) {
do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));