aboutsummaryrefslogtreecommitdiff
path: root/host/include/generic
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-05-24 14:45:43 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-05-30 09:51:11 -0700
commitb3f4144fa930655b302c45d5a9284eb7b26a34bc (patch)
tree9b5eb2d496b73d666325c3650d364bb7c000a4d6 /host/include/generic
parentaf844a1149691f774caeff3265af905602a98645 (diff)
downloadqemu-b3f4144fa930655b302c45d5a9284eb7b26a34bc.zip
qemu-b3f4144fa930655b302c45d5a9284eb7b26a34bc.tar.gz
qemu-b3f4144fa930655b302c45d5a9284eb7b26a34bc.tar.bz2
accel/tcg: Extract store_atom_insert_al16 to host header
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'host/include/generic')
-rw-r--r--host/include/generic/host/store-insert-al16.h50
1 files changed, 50 insertions, 0 deletions
diff --git a/host/include/generic/host/store-insert-al16.h b/host/include/generic/host/store-insert-al16.h
new file mode 100644
index 0000000..4a16621
--- /dev/null
+++ b/host/include/generic/host/store-insert-al16.h
@@ -0,0 +1,50 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ * Atomic store insert into 128-bit, generic version.
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef HOST_STORE_INSERT_AL16_H
+#define HOST_STORE_INSERT_AL16_H
+
+/**
+ * store_atom_insert_al16:
+ * @p: host address
+ * @val: shifted value to store
+ * @msk: mask for value to store
+ *
+ * Atomically store @val to @p masked by @msk.
+ */
+static inline void ATTRIBUTE_ATOMIC128_OPT
+store_atom_insert_al16(Int128 *ps, Int128 val, Int128 msk)
+{
+#if defined(CONFIG_ATOMIC128)
+ __uint128_t *pu;
+ Int128Alias old, new;
+
+ /* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
+ pu = __builtin_assume_aligned(ps, 16);
+ old.u = *pu;
+ msk = int128_not(msk);
+ do {
+ new.s = int128_and(old.s, msk);
+ new.s = int128_or(new.s, val);
+ } while (!__atomic_compare_exchange_n(pu, &old.u, new.u, true,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+#else
+ Int128 old, new, cmp;
+
+ ps = __builtin_assume_aligned(ps, 16);
+ old = *ps;
+ msk = int128_not(msk);
+ do {
+ cmp = old;
+ new = int128_and(old, msk);
+ new = int128_or(new, val);
+ old = atomic16_cmpxchg(ps, cmp, new);
+ } while (int128_ne(cmp, old));
+#endif
+}
+
+#endif /* HOST_STORE_INSERT_AL16_H */