aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorEmilio G. Cota <cota@braap.org>2016-05-24 16:06:13 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-05-29 09:11:11 +0200
commitc983895258a771f8a5e4a53950bfb7fd2216651c (patch)
treeb383a83403d84c2d8247143783f3f700772dcaf8 /include
parent56ebe02203f033a8399f7f6ea6972225ed87101c (diff)
downloadqemu-c983895258a771f8a5e4a53950bfb7fd2216651c.zip
qemu-c983895258a771f8a5e4a53950bfb7fd2216651c.tar.gz
qemu-c983895258a771f8a5e4a53950bfb7fd2216651c.tar.bz2
atomics: emit an smp_read_barrier_depends() barrier only for Alpha and Thread Sanitizer
For correctness, smp_read_barrier_depends() is only required to emit a barrier on Alpha hosts. However, we are currently emitting a consume fence unconditionally, and most compilers currently treat consume and acquire fences as equivalent. Fix it by keeping the consume fence if we're compiling with Thread Sanitizer, since this might help prevent false warnings. Otherwise, only emit the barrier for Alpha hosts. Note that we still guarantee that smp_read_barrier_depends() is a compiler barrier. Signed-off-by: Emilio G. Cota <cota@braap.org> Message-Id: <1464120374-8950-3-git-send-email-cota@braap.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/qemu/atomic.h11
1 files changed, 11 insertions, 0 deletions
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index 5bc4d6c..96db6e9 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -36,7 +36,18 @@
#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
+/* Most compilers currently treat consume and acquire the same, but really
+ * no processors except Alpha need a barrier here. Leave it in if
+ * using Thread Sanitizer to avoid warnings, otherwise optimize it away.
+ */
+#if defined(__SANITIZE_THREAD__)
#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
+#elsif defined(__alpha__)
+#define smp_read_barrier_depends() asm volatile("mb":::"memory")
+#else
+#define smp_read_barrier_depends() barrier()
+#endif
+
/* Weak atomic operations prevent the compiler moving other
* loads/stores past the atomic operation load/store. However there is