]> git.proxmox.com Git - mirror_qemu.git/commitdiff
tb-maint: do not use mb_read/mb_set
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 3 Mar 2023 12:12:50 +0000 (13:12 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 8 May 2023 09:10:49 +0000 (11:10 +0200)
The load side can use a relaxed load, which will surely happen before
the work item is run by async_safe_run_on_cpu() or before double-checking
under mmap_lock.  The store side can use an atomic RMW operation.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
accel/tcg/tb-maint.c

index cb1f806f00d4609faca29a57795bea38d17d2156..0dd173fbf048c9a247f08f1d14e4be0dffd0ac70 100644 (file)
@@ -746,7 +746,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
 
     tcg_region_reset_all();
     /* XXX: flush processor icache at this point if cache flush is expensive */
-    qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
+    qatomic_inc(&tb_ctx.tb_flush_count);
 
 done:
     mmap_unlock();
@@ -758,7 +758,7 @@ done:
 void tb_flush(CPUState *cpu)
 {
     if (tcg_enabled()) {
-        unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
+        unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
 
         if (cpu_in_exclusive_context(cpu)) {
             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));