diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-05-19 19:22:25 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-23 18:54:55 -0700 |
commit | e45fb74ddb75913f81a48112b828ee58bd9f6e2a (patch) | |
tree | 7f8b9a6dba5bf8531568a6104e061d73c8c3f0b7 /host | |
parent | b35b812567f090e41c4b194bb2a752f29e9aedcc (diff) | |
download | qemu-e45fb74ddb75913f81a48112b828ee58bd9f6e2a.zip qemu-e45fb74ddb75913f81a48112b828ee58bd9f6e2a.tar.gz qemu-e45fb74ddb75913f81a48112b828ee58bd9f6e2a.tar.bz2 |
qemu/atomic128: Add runtime test for FEAT_LSE2
With FEAT_LSE2, load and store of int128 is directly supported.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'host')
-rw-r--r-- | host/include/aarch64/host/atomic128-ldst.h | 53 |
1 files changed, 40 insertions, 13 deletions
diff --git a/host/include/aarch64/host/atomic128-ldst.h b/host/include/aarch64/host/atomic128-ldst.h index 4b1360d..a08f62c 100644 --- a/host/include/aarch64/host/atomic128-ldst.h +++ b/host/include/aarch64/host/atomic128-ldst.h @@ -11,27 +11,48 @@ #ifndef AARCH64_ATOMIC128_LDST_H #define AARCH64_ATOMIC128_LDST_H +#include "host/cpuinfo.h" +#include "tcg/debug-assert.h" + /* * Through gcc 10, aarch64 has no support for 128-bit atomics. * Through clang 16, without -march=armv8.4-a, __atomic_load_16 * is incorrectly expanded to a read-write operation. + * + * Anyway, this method allows runtime detection of FEAT_LSE2. */ -#define HAVE_ATOMIC128_RO 0 +#define HAVE_ATOMIC128_RO (cpuinfo & CPUINFO_LSE2) #define HAVE_ATOMIC128_RW 1 -Int128 QEMU_ERROR("unsupported atomic") atomic16_read_ro(const Int128 *ptr); +static inline Int128 atomic16_read_ro(const Int128 *ptr) +{ + uint64_t l, h; + + tcg_debug_assert(HAVE_ATOMIC128_RO); + /* With FEAT_LSE2, 16-byte aligned LDP is atomic. */ + asm("ldp %[l], %[h], %[mem]" + : [l] "=r"(l), [h] "=r"(h) : [mem] "m"(*ptr)); + + return int128_make128(l, h); +} static inline Int128 atomic16_read_rw(Int128 *ptr) { uint64_t l, h; uint32_t tmp; - /* The load must be paired with the store to guarantee not tearing. */ - asm("0: ldxp %[l], %[h], %[mem]\n\t" - "stxp %w[tmp], %[l], %[h], %[mem]\n\t" - "cbnz %w[tmp], 0b" - : [mem] "+m"(*ptr), [tmp] "=r"(tmp), [l] "=r"(l), [h] "=r"(h)); + if (cpuinfo & CPUINFO_LSE2) { + /* With FEAT_LSE2, 16-byte aligned LDP is atomic. */ + asm("ldp %[l], %[h], %[mem]" + : [l] "=r"(l), [h] "=r"(h) : [mem] "m"(*ptr)); + } else { + /* The load must be paired with the store to guarantee not tearing. */ + asm("0: ldxp %[l], %[h], %[mem]\n\t" + "stxp %w[tmp], %[l], %[h], %[mem]\n\t" + "cbnz %w[tmp], 0b" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), [l] "=&r"(l), [h] "=&r"(h)); + } return int128_make128(l, h); } @@ -41,12 +62,18 @@ static inline void atomic16_set(Int128 *ptr, Int128 val) uint64_t l = int128_getlo(val), h = int128_gethi(val); uint64_t t1, t2; - /* Load into temporaries to acquire the exclusive access lock. */ - asm("0: ldxp %[t1], %[t2], %[mem]\n\t" - "stxp %w[t1], %[l], %[h], %[mem]\n\t" - "cbnz %w[t1], 0b" - : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) - : [l] "r"(l), [h] "r"(h)); + if (cpuinfo & CPUINFO_LSE2) { + /* With FEAT_LSE2, 16-byte aligned STP is atomic. */ + asm("stp %[l], %[h], %[mem]" + : [mem] "=m"(*ptr) : [l] "r"(l), [h] "r"(h)); + } else { + /* Load into temporaries to acquire the exclusive access lock. */ + asm("0: ldxp %[t1], %[t2], %[mem]\n\t" + "stxp %w[t1], %[l], %[h], %[mem]\n\t" + "cbnz %w[t1], 0b" + : [mem] "+m"(*ptr), [t1] "=&r"(t1), [t2] "=&r"(t2) + : [l] "r"(l), [h] "r"(h)); + } } #endif /* AARCH64_ATOMIC128_LDST_H */ |