aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-11-05 11:34:58 +0000
committerRichard Henderson <richard.henderson@linaro.org>2023-05-16 15:21:39 -0700
commite61f1efeb730fd64441131ea721086065904ff67 (patch)
treec1c902b988700c06782fb573239a6cff0c0d3646 /accel
parent35c653c4029794f67a523191941104fe12f2b22d (diff)
downloadqemu-e61f1efeb730fd64441131ea721086065904ff67.zip
qemu-e61f1efeb730fd64441131ea721086065904ff67.tar.gz
qemu-e61f1efeb730fd64441131ea721086065904ff67.tar.bz2
meson: Detect atomic128 support with optimization
There is an edge condition prior to gcc13 for which optimization is required to generate 16-byte atomic sequences. Detect this. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/ldst_atomicity.c.inc29
1 files changed, 25 insertions, 4 deletions
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index ce73b32..ba5db7c 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -16,6 +16,23 @@
#endif
#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
+/*
+ * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
+ * that are supported by the host, e.g. s390x. We can force the pointer to
+ * have our known alignment with __builtin_assume_aligned, however prior to
+ * GCC 13 that was only reliable with optimization enabled. See
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
+ */
+#if defined(CONFIG_ATOMIC128_OPT)
+# if !defined(__OPTIMIZE__)
+# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
+# endif
+# define CONFIG_ATOMIC128
+#endif
+#ifndef ATTRIBUTE_ATOMIC128_OPT
+# define ATTRIBUTE_ATOMIC128_OPT
+#endif
+
#if defined(CONFIG_ATOMIC128)
# define HAVE_al16_fast true
#else
@@ -152,7 +169,8 @@ static inline uint64_t load_atomic8(void *pv)
*
* Atomically load 16 aligned bytes from @pv.
*/
-static inline Int128 load_atomic16(void *pv)
+static inline Int128 ATTRIBUTE_ATOMIC128_OPT
+load_atomic16(void *pv)
{
#ifdef CONFIG_ATOMIC128
__uint128_t *p = __builtin_assume_aligned(pv, 16);
@@ -356,7 +374,8 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
* cross an 16-byte boundary then the access must be 16-byte atomic,
* otherwise the access must be 8-byte atomic.
*/
-static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s)
+static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
+load_atom_extract_al16_or_al8(void *pv, int s)
{
#if defined(CONFIG_ATOMIC128)
uintptr_t pi = (uintptr_t)pv;
@@ -692,7 +711,8 @@ static inline void store_atomic8(void *pv, uint64_t val)
*
* Atomically store 16 aligned bytes to @pv.
*/
-static inline void store_atomic16(void *pv, Int128Alias val)
+static inline void ATTRIBUTE_ATOMIC128_OPT
+store_atomic16(void *pv, Int128Alias val)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu = __builtin_assume_aligned(pv, 16);
@@ -790,7 +810,8 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
*
* Atomically store @val to @p masked by @msk.
*/
-static void store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
+static void ATTRIBUTE_ATOMIC128_OPT
+store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu, old, new;