aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/atomic_common.c.inc
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-11-09 00:23:44 +1100
committerRichard Henderson <richard.henderson@linaro.org>2023-02-04 06:19:42 -1000
commit123ae5683c9e7815857304fd2f21664621c90a13 (patch)
tree1f26d3d871ddc58e58e2d3dacb9cd31a464209b1 /accel/tcg/atomic_common.c.inc
parentcb48f3654e290ee5d7cbf1fb31888463fa2a180c (diff)
downloadqemu-123ae5683c9e7815857304fd2f21664621c90a13.zip
qemu-123ae5683c9e7815857304fd2f21664621c90a13.tar.gz
qemu-123ae5683c9e7815857304fd2f21664621c90a13.tar.bz2
tcg: Add tcg_gen_{non}atomic_cmpxchg_i128
This will allow targets to avoid rolling their own. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/atomic_common.c.inc')
-rw-r--r--accel/tcg/atomic_common.c.inc45
1 files changed, 45 insertions, 0 deletions
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
index 6602d76..8f2ce43 100644
--- a/accel/tcg/atomic_common.c.inc
+++ b/accel/tcg/atomic_common.c.inc
@@ -55,8 +55,53 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
#endif
+#ifdef CONFIG_CMPXCHG128
+CMPXCHG_HELPER(cmpxchgo_be, Int128)
+CMPXCHG_HELPER(cmpxchgo_le, Int128)
+#endif
+
#undef CMPXCHG_HELPER
+Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
+ Int128 cmpv, Int128 newv, uint32_t oi)
+{
+#if TCG_TARGET_REG_BITS == 32
+ uintptr_t ra = GETPC();
+ Int128 oldv;
+
+ oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
+ if (int128_eq(oldv, cmpv)) {
+ cpu_st16_be_mmu(env, addr, newv, oi, ra);
+ } else {
+ /* Even with comparison failure, still need a write cycle. */
+ probe_write(env, addr, 16, get_mmuidx(oi), ra);
+ }
+ return oldv;
+#else
+ g_assert_not_reached();
+#endif
+}
+
+Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
+ Int128 cmpv, Int128 newv, uint32_t oi)
+{
+#if TCG_TARGET_REG_BITS == 32
+ uintptr_t ra = GETPC();
+ Int128 oldv;
+
+ oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
+ if (int128_eq(oldv, cmpv)) {
+ cpu_st16_le_mmu(env, addr, newv, oi, ra);
+ } else {
+ /* Even with comparison failure, still need a write cycle. */
+ probe_write(env, addr, 16, get_mmuidx(oi), ra);
+ }
+ return oldv;
+#else
+ g_assert_not_reached();
+#endif
+}
+
#define ATOMIC_HELPER(OP, TYPE) \
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
TYPE val, uint32_t oi) \