aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-05-16 21:30:27 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-05-16 21:30:27 -0700
commit6972ef1440a9d685482d78672620a7482f2bd09a (patch)
tree927a0f6eba5c6400d74d1883ee9f3e5427696e25 /accel
parentf9d58e0ca53b3f470b84725a7b5e47fcf446a2ea (diff)
parent7d478306e84259678b2941e8af7496ef32a9c4c5 (diff)
downloadqemu-6972ef1440a9d685482d78672620a7482f2bd09a.zip
qemu-6972ef1440a9d685482d78672620a7482f2bd09a.tar.gz
qemu-6972ef1440a9d685482d78672620a7482f2bd09a.tar.bz2
Merge tag 'pull-tcg-20230516-3' of https://gitlab.com/rth7680/qemu into staging
tcg/i386: Fix tcg_out_addi_ptr for win64 tcg: Implement atomicity for TCGv_i128 tcg: First quarter of cleanups for building tcg once # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmRkWC8dHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/I+wf8CUF+J/E9u0EuurrB # 1asDicANUJIACnqlcEpSPKuSMtbzo1RDTQUR+d3GWJjyLASvSJZFZTQqWBdACRpc # sNuDz3/1a6FbiM14CwIVmPpcjQXa+18Ck670Chmw51KyEt2xyDJTySFIGEqjiuTf # YVDBbOs8neFZdcDvAs1qNUTjhRj4nNtkpQoBpv0tGH7E0CzPp6OcvxwfieVyLOIa # Cy1ELM3aMyVN5MTjnORYLK70Pa9emdjB88SlypZx363ARKC7B50lzYPQ4E5zrOZq # FKrOq5nFWLCtn4BID0R+jUmuUP6znR/hTlToDmf/9B4j9TUivERWlc54lz3YU6Gn # su3FKg== # =LVOb # -----END PGP SIGNATURE----- # gpg: Signature made Tue 16 May 2023 09:29:35 PM PDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate] * tag 'pull-tcg-20230516-3' of https://gitlab.com/rth7680/qemu: (74 commits) tcg: Split out exec/user/guest-base.h tcg: Add tlb_dyn_max_bits to TCGContext tcg: Add page_bits and page_mask to TCGContext tcg: Remove TARGET_LONG_BITS, TCG_TYPE_TL tcg/mips: Remove TARGET_LONG_BITS, TCG_TYPE_TL tcg/loongarch64: Remove TARGET_LONG_BITS, TCG_TYPE_TL tcg/aarch64: Remove TARGET_LONG_BITS, TCG_TYPE_TL tcg/aarch64: Remove USE_GUEST_BASE tcg/arm: Remove TARGET_LONG_BITS tcg/i386: Remove TARGET_LONG_BITS, TCG_TYPE_TL tcg/i386: Adjust type of tlb_mask tcg/i386: Conditionalize tcg_out_extu_i32_i64 tcg/i386: Always enable TCG_TARGET_HAS_extr[lh]_i64_i32 tcg/tci: Elimnate TARGET_LONG_BITS, target_ulong tcg: Split INDEX_op_qemu_{ld,st}* for guest address size tcg: Remove TCGv from tcg_gen_atomic_* tcg: Remove TCGv from tcg_gen_qemu_{ld,st}_* tcg: Add addr_type to TCGContext accel/tcg: Widen plugin_gen_empty_mem_callback to i64 tcg: Reduce copies for plugin_gen_mem_callbacks ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/atomic_common.c.inc14
-rw-r--r--accel/tcg/cputlb.c831
-rw-r--r--accel/tcg/ldst_atomicity.c.inc1262
-rw-r--r--accel/tcg/plugin-gen.c68
-rw-r--r--accel/tcg/tcg-runtime.h49
-rw-r--r--accel/tcg/translate-all.c35
-rw-r--r--accel/tcg/user-exec.c444
7 files changed, 2187 insertions, 516 deletions
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
index 8f2ce43..fe0eea0 100644
--- a/accel/tcg/atomic_common.c.inc
+++ b/accel/tcg/atomic_common.c.inc
@@ -13,20 +13,20 @@
* See the COPYING file in the top-level directory.
*/
-static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
+static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}
#if HAVE_ATOMIC128
-static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
+static void atomic_trace_ld_post(CPUArchState *env, uint64_t addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
}
-static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
+static void atomic_trace_st_post(CPUArchState *env, uint64_t addr,
MemOpIdx oi)
{
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
@@ -40,7 +40,7 @@ static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
*/
#define CMPXCHG_HELPER(OP, TYPE) \
- TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \
+ TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \
TYPE oldv, TYPE newv, uint32_t oi) \
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
@@ -62,7 +62,7 @@ CMPXCHG_HELPER(cmpxchgo_le, Int128)
#undef CMPXCHG_HELPER
-Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
+Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, uint64_t addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
@@ -82,7 +82,7 @@ Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
#endif
}
-Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
+Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, uint64_t addr,
Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
@@ -103,7 +103,7 @@ Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
}
#define ATOMIC_HELPER(OP, TYPE) \
- TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \
+ TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \
TYPE val, uint32_t oi) \
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 6177770..ae0fbcd 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -40,6 +40,7 @@
#include "qemu/plugin-memory.h"
#endif
#include "tcg/tcg-ldst.h"
+#include "exec/helper-proto.h"
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
@@ -1668,6 +1669,9 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
return qemu_ram_addr_from_host_nofail(p);
}
+/* Load/store with atomicity primitives. */
+#include "ldst_atomicity.c.inc"
+
#ifdef CONFIG_PLUGIN
/*
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
@@ -2010,60 +2014,13 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
/*
- * Verify that we have passed the correct MemOp to the correct function.
- *
- * In the case of the helper_*_mmu functions, we will have done this by
- * using the MemOp to look up the helper during code generation.
- *
- * In the case of the cpu_*_mmu functions, this is up to the caller.
- * We could present one function to target code, and dispatch based on
- * the MemOp, but so far we have worked hard to avoid an indirect function
- * call along the memory path.
- */
-static void validate_memop(MemOpIdx oi, MemOp expected)
-{
-#ifdef CONFIG_DEBUG_TCG
- MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
- assert(have == expected);
-#endif
-}
-
-/*
* Load Helpers
*
* We support two different access types. SOFTMMU_CODE_ACCESS is
* specifically for reading instructions from system memory. It is
* called by the translation loop and in some helpers where the code
* is disassembled. It shouldn't be called directly by guest code.
- */
-
-typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr);
-
-static inline uint64_t QEMU_ALWAYS_INLINE
-load_memop(const void *haddr, MemOp op)
-{
- switch (op) {
- case MO_UB:
- return ldub_p(haddr);
- case MO_BEUW:
- return lduw_be_p(haddr);
- case MO_LEUW:
- return lduw_le_p(haddr);
- case MO_BEUL:
- return (uint32_t)ldl_be_p(haddr);
- case MO_LEUL:
- return (uint32_t)ldl_le_p(haddr);
- case MO_BEUQ:
- return ldq_be_p(haddr);
- case MO_LEUQ:
- return ldq_le_p(haddr);
- default:
- qemu_build_not_reached();
- }
-}
-
-/*
+ *
* For the benefit of TCG generated code, we want to avoid the
* complication of ABI-specific return type promotion and always
* return a value extended to the register size of the host. This is
@@ -2119,20 +2076,224 @@ static uint64_t do_ld_bytes_beN(MMULookupPageData *p, uint64_t ret_be)
return ret_be;
}
+/**
+ * do_ld_parts_beN
+ * @p: translation parameters
+ * @ret_be: accumulated data
+ *
+ * As do_ld_bytes_beN, but atomically on each aligned part.
+ */
+static uint64_t do_ld_parts_beN(MMULookupPageData *p, uint64_t ret_be)
+{
+ void *haddr = p->haddr;
+ int size = p->size;
+
+ do {
+ uint64_t x;
+ int n;
+
+ /*
+ * Find minimum of alignment and size.
+ * This is slightly stronger than required by MO_ATOM_SUBALIGN, which
+ * would have only checked the low bits of addr|size once at the start,
+ * but is just as easy.
+ */
+ switch (((uintptr_t)haddr | size) & 7) {
+ case 4:
+ x = cpu_to_be32(load_atomic4(haddr));
+ ret_be = (ret_be << 32) | x;
+ n = 4;
+ break;
+ case 2:
+ case 6:
+ x = cpu_to_be16(load_atomic2(haddr));
+ ret_be = (ret_be << 16) | x;
+ n = 2;
+ break;
+ default:
+ x = *(uint8_t *)haddr;
+ ret_be = (ret_be << 8) | x;
+ n = 1;
+ break;
+ case 0:
+ g_assert_not_reached();
+ }
+ haddr += n;
+ size -= n;
+ } while (size != 0);
+ return ret_be;
+}
+
+/**
+ * do_ld_parts_be4
+ * @p: translation parameters
+ * @ret_be: accumulated data
+ *
+ * As do_ld_bytes_beN, but with one atomic load.
+ * Four aligned bytes are guaranteed to cover the load.
+ */
+static uint64_t do_ld_whole_be4(MMULookupPageData *p, uint64_t ret_be)
+{
+ int o = p->addr & 3;
+ uint32_t x = load_atomic4(p->haddr - o);
+
+ x = cpu_to_be32(x);
+ x <<= o * 8;
+ x >>= (4 - p->size) * 8;
+ return (ret_be << (p->size * 8)) | x;
+}
+
+/**
+ * do_ld_parts_be8
+ * @p: translation parameters
+ * @ret_be: accumulated data
+ *
+ * As do_ld_bytes_beN, but with one atomic load.
+ * Eight aligned bytes are guaranteed to cover the load.
+ */
+static uint64_t do_ld_whole_be8(CPUArchState *env, uintptr_t ra,
+ MMULookupPageData *p, uint64_t ret_be)
+{
+ int o = p->addr & 7;
+ uint64_t x = load_atomic8_or_exit(env, ra, p->haddr - o);
+
+ x = cpu_to_be64(x);
+ x <<= o * 8;
+ x >>= (8 - p->size) * 8;
+ return (ret_be << (p->size * 8)) | x;
+}
+
+/**
+ * do_ld_parts_be16
+ * @p: translation parameters
+ * @ret_be: accumulated data
+ *
+ * As do_ld_bytes_beN, but with one atomic load.
+ * 16 aligned bytes are guaranteed to cover the load.
+ */
+static Int128 do_ld_whole_be16(CPUArchState *env, uintptr_t ra,
+ MMULookupPageData *p, uint64_t ret_be)
+{
+ int o = p->addr & 15;
+ Int128 x, y = load_atomic16_or_exit(env, ra, p->haddr - o);
+ int size = p->size;
+
+ if (!HOST_BIG_ENDIAN) {
+ y = bswap128(y);
+ }
+ y = int128_lshift(y, o * 8);
+ y = int128_urshift(y, (16 - size) * 8);
+ x = int128_make64(ret_be);
+ x = int128_lshift(x, size * 8);
+ return int128_or(x, y);
+}
+
/*
* Wrapper for the above.
*/
static uint64_t do_ld_beN(CPUArchState *env, MMULookupPageData *p,
- uint64_t ret_be, int mmu_idx,
- MMUAccessType type, uintptr_t ra)
+ uint64_t ret_be, int mmu_idx, MMUAccessType type,
+ MemOp mop, uintptr_t ra)
{
+ MemOp atom;
+ unsigned tmp, half_size;
+
if (unlikely(p->flags & TLB_MMIO)) {
return do_ld_mmio_beN(env, p, ret_be, mmu_idx, type, ra);
- } else {
+ }
+
+ /*
+ * It is a given that we cross a page and therefore there is no
+ * atomicity for the load as a whole, but subobjects may need attention.
+ */
+ atom = mop & MO_ATOM_MASK;
+ switch (atom) {
+ case MO_ATOM_SUBALIGN:
+ return do_ld_parts_beN(p, ret_be);
+
+ case MO_ATOM_IFALIGN_PAIR:
+ case MO_ATOM_WITHIN16_PAIR:
+ tmp = mop & MO_SIZE;
+ tmp = tmp ? tmp - 1 : 0;
+ half_size = 1 << tmp;
+ if (atom == MO_ATOM_IFALIGN_PAIR
+ ? p->size == half_size
+ : p->size >= half_size) {
+ if (!HAVE_al8_fast && p->size < 4) {
+ return do_ld_whole_be4(p, ret_be);
+ } else {
+ return do_ld_whole_be8(env, ra, p, ret_be);
+ }
+ }
+ /* fall through */
+
+ case MO_ATOM_IFALIGN:
+ case MO_ATOM_WITHIN16:
+ case MO_ATOM_NONE:
return do_ld_bytes_beN(p, ret_be);
+
+ default:
+ g_assert_not_reached();
}
}
+/*
+ * Wrapper for the above, for 8 < size < 16.
+ */
+static Int128 do_ld16_beN(CPUArchState *env, MMULookupPageData *p,
+ uint64_t a, int mmu_idx, MemOp mop, uintptr_t ra)
+{
+ int size = p->size;
+ uint64_t b;
+ MemOp atom;
+
+ if (unlikely(p->flags & TLB_MMIO)) {
+ p->size = size - 8;
+ a = do_ld_mmio_beN(env, p, a, mmu_idx, MMU_DATA_LOAD, ra);
+ p->addr += p->size;
+ p->size = 8;
+ b = do_ld_mmio_beN(env, p, 0, mmu_idx, MMU_DATA_LOAD, ra);
+ return int128_make128(b, a);
+ }
+
+ /*
+ * It is a given that we cross a page and therefore there is no
+ * atomicity for the load as a whole, but subobjects may need attention.
+ */
+ atom = mop & MO_ATOM_MASK;
+ switch (atom) {
+ case MO_ATOM_SUBALIGN:
+ p->size = size - 8;
+ a = do_ld_parts_beN(p, a);
+ p->haddr += size - 8;
+ p->size = 8;
+ b = do_ld_parts_beN(p, 0);
+ break;
+
+ case MO_ATOM_WITHIN16_PAIR:
+ /* Since size > 8, this is the half that must be atomic. */
+ return do_ld_whole_be16(env, ra, p, a);
+
+ case MO_ATOM_IFALIGN_PAIR:
+ /*
+ * Since size > 8, both halves are misaligned,
+ * and so neither is atomic.
+ */
+ case MO_ATOM_IFALIGN:
+ case MO_ATOM_WITHIN16:
+ case MO_ATOM_NONE:
+ p->size = size - 8;
+ a = do_ld_bytes_beN(p, a);
+ b = ldq_be_p(p->haddr + size - 8);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ return int128_make128(b, a);
+}
+
static uint8_t do_ld_1(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
MMUAccessType type, uintptr_t ra)
{
@@ -2153,7 +2314,7 @@ static uint16_t do_ld_2(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
}
/* Perform the load host endian, then swap if necessary. */
- ret = load_memop(p->haddr, MO_UW);
+ ret = load_atom_2(env, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap16(ret);
}
@@ -2170,7 +2331,7 @@ static uint32_t do_ld_4(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
}
/* Perform the load host endian. */
- ret = load_memop(p->haddr, MO_UL);
+ ret = load_atom_4(env, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap32(ret);
}
@@ -2187,7 +2348,7 @@ static uint64_t do_ld_8(CPUArchState *env, MMULookupPageData *p, int mmu_idx,
}
/* Perform the load host endian. */
- ret = load_memop(p->haddr, MO_UQ);
+ ret = load_atom_8(env, ra, p->haddr, memop);
if (memop & MO_BSWAP) {
ret = bswap64(ret);
}
@@ -2206,10 +2367,10 @@ static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
}
-tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_UB);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2237,17 +2398,10 @@ static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return ret;
}
-tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEUW);
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_BEUW);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2263,25 +2417,18 @@ static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return do_ld_4(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, ra);
+ ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap32(ret);
}
return ret;
}
-tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEUL);
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_BEUL);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2297,25 +2444,18 @@ static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return do_ld_8(env, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
}
- ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, ra);
- ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, ra);
+ ret = do_ld_beN(env, &l.page[0], 0, l.mmu_idx, access_type, l.memop, ra);
+ ret = do_ld_beN(env, &l.page[1], ret, l.mmu_idx, access_type, l.memop, ra);
if ((l.memop & MO_BSWAP) == MO_LE) {
ret = bswap64(ret);
}
return ret;
}
-uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUQ);
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUQ);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2324,35 +2464,96 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
*/
+tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
+}
-tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
+ return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
}
-tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
+ return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
}
-tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+static Int128 do_ld16_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t ra)
{
- return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
+ MMULookupLocals l;
+ bool crosspage;
+ uint64_t a, b;
+ Int128 ret;
+ int first;
+
+ crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD, &l);
+ if (likely(!crosspage)) {
+ /* Perform the load host endian. */
+ if (unlikely(l.page[0].flags & TLB_MMIO)) {
+ QEMU_IOTHREAD_LOCK_GUARD();
+ a = io_readx(env, l.page[0].full, l.mmu_idx, addr,
+ ra, MMU_DATA_LOAD, MO_64);
+ b = io_readx(env, l.page[0].full, l.mmu_idx, addr + 8,
+ ra, MMU_DATA_LOAD, MO_64);
+ ret = int128_make128(HOST_BIG_ENDIAN ? b : a,
+ HOST_BIG_ENDIAN ? a : b);
+ } else {
+ ret = load_atom_16(env, ra, l.page[0].haddr, l.memop);
+ }
+ if (l.memop & MO_BSWAP) {
+ ret = bswap128(ret);
+ }
+ return ret;
+ }
+
+ first = l.page[0].size;
+ if (first == 8) {
+ MemOp mop8 = (l.memop & ~MO_SIZE) | MO_64;
+
+ a = do_ld_8(env, &l.page[0], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ b = do_ld_8(env, &l.page[1], l.mmu_idx, MMU_DATA_LOAD, mop8, ra);
+ if ((mop8 & MO_BSWAP) == MO_LE) {
+ ret = int128_make128(a, b);
+ } else {
+ ret = int128_make128(b, a);
+ }
+ return ret;
+ }
+
+ if (first < 8) {
+ a = do_ld_beN(env, &l.page[0], 0, l.mmu_idx,
+ MMU_DATA_LOAD, l.memop, ra);
+ ret = do_ld16_beN(env, &l.page[1], a, l.mmu_idx, l.memop, ra);
+ } else {
+ ret = do_ld16_beN(env, &l.page[0], 0, l.mmu_idx, l.memop, ra);
+ b = int128_getlo(ret);
+ ret = int128_lshift(ret, l.page[1].size * 8);
+ a = int128_gethi(ret);
+ b = do_ld_beN(env, &l.page[1], b, l.mmu_idx,
+ MMU_DATA_LOAD, l.memop, ra);
+ ret = int128_make128(b, a);
+ }
+ if ((l.memop & MO_BSWAP) == MO_LE) {
+ ret = bswap128(ret);
+ }
+ return ret;
}
-tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+ uint32_t oi, uintptr_t retaddr)
{
- return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+ return do_ld16_mmu(env, addr, oi, retaddr);
}
-tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
{
- return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
+ return helper_ld16_mmu(env, addr, oi, GETPC());
}
/*
@@ -2368,7 +2569,7 @@ uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
{
uint8_t ret;
- validate_memop(oi, MO_UB);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2379,7 +2580,7 @@ uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
{
uint16_t ret;
- validate_memop(oi, MO_BEUW);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW);
ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2390,7 +2591,7 @@ uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
{
uint32_t ret;
- validate_memop(oi, MO_BEUL);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL);
ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2401,7 +2602,7 @@ uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
{
uint64_t ret;
- validate_memop(oi, MO_BEUQ);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ);
ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2412,7 +2613,7 @@ uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
{
uint16_t ret;
- validate_memop(oi, MO_LEUW);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW);
ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2423,7 +2624,7 @@ uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
{
uint32_t ret;
- validate_memop(oi, MO_LEUL);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL);
ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2434,7 +2635,7 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
{
uint64_t ret;
- validate_memop(oi, MO_LEUQ);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ);
ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2443,95 +2644,29 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- int mmu_idx = get_mmuidx(oi);
- MemOpIdx new_oi;
- unsigned a_bits;
- uint64_t h, l;
-
- tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128));
- a_bits = get_alignment_bits(mop);
-
- /* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD,
- mmu_idx, ra);
- }
+ Int128 ret;
- /* Construct an unaligned 64-bit replacement MemOpIdx. */
- mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
- new_oi = make_memop_idx(mop, mmu_idx);
-
- h = helper_be_ldq_mmu(env, addr, new_oi, ra);
- l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra);
-
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return int128_make128(l, h);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128));
+ ret = do_ld16_mmu(env, addr, oi, ra);
+ plugin_load_cb(env, addr, oi);
+ return ret;
}
Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- int mmu_idx = get_mmuidx(oi);
- MemOpIdx new_oi;
- unsigned a_bits;
- uint64_t h, l;
-
- tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128));
- a_bits = get_alignment_bits(mop);
+ Int128 ret;
- /* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD,
- mmu_idx, ra);
- }
-
- /* Construct an unaligned 64-bit replacement MemOpIdx. */
- mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
- new_oi = make_memop_idx(mop, mmu_idx);
-
- l = helper_le_ldq_mmu(env, addr, new_oi, ra);
- h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra);
-
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return int128_make128(l, h);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128));
+ ret = do_ld16_mmu(env, addr, oi, ra);
+ plugin_load_cb(env, addr, oi);
+ return ret;
}
/*
* Store Helpers
*/
-static inline void QEMU_ALWAYS_INLINE
-store_memop(void *haddr, uint64_t val, MemOp op)
-{
- switch (op) {
- case MO_UB:
- stb_p(haddr, val);
- break;
- case MO_BEUW:
- stw_be_p(haddr, val);
- break;
- case MO_LEUW:
- stw_le_p(haddr, val);
- break;
- case MO_BEUL:
- stl_be_p(haddr, val);
- break;
- case MO_LEUL:
- stl_le_p(haddr, val);
- break;
- case MO_BEUQ:
- stq_be_p(haddr, val);
- break;
- case MO_LEUQ:
- stq_le_p(haddr, val);
- break;
- default:
- qemu_build_not_reached();
- }
-}
-
/**
* do_st_mmio_leN:
* @env: cpu context
@@ -2558,38 +2693,110 @@ static uint64_t do_st_mmio_leN(CPUArchState *env, MMULookupPageData *p,
return val_le;
}
-/**
- * do_st_bytes_leN:
- * @p: translation parameters
- * @val_le: data to store
- *
- * Store @p->size bytes at @p->haddr, which is RAM.
- * The bytes to store are extracted in little-endian order from @val_le;
- * return the bytes of @val_le beyond @p->size that have not been stored.
+/*
+ * Wrapper for the above.
*/
-static uint64_t do_st_bytes_leN(MMULookupPageData *p, uint64_t val_le)
+static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
+ uint64_t val_le, int mmu_idx,
+ MemOp mop, uintptr_t ra)
{
- uint8_t *haddr = p->haddr;
- int i, size = p->size;
+ MemOp atom;
+ unsigned tmp, half_size;
- for (i = 0; i < size; i++, val_le >>= 8) {
- haddr[i] = val_le;
+ if (unlikely(p->flags & TLB_MMIO)) {
+ return do_st_mmio_leN(env, p, val_le, mmu_idx, ra);
+ } else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
+ return val_le >> (p->size * 8);
+ }
+
+ /*
+ * It is a given that we cross a page and therefore there is no atomicity
+ * for the store as a whole, but subobjects may need attention.
+ */
+ atom = mop & MO_ATOM_MASK;
+ switch (atom) {
+ case MO_ATOM_SUBALIGN:
+ return store_parts_leN(p->haddr, p->size, val_le);
+
+ case MO_ATOM_IFALIGN_PAIR:
+ case MO_ATOM_WITHIN16_PAIR:
+ tmp = mop & MO_SIZE;
+ tmp = tmp ? tmp - 1 : 0;
+ half_size = 1 << tmp;
+ if (atom == MO_ATOM_IFALIGN_PAIR
+ ? p->size == half_size
+ : p->size >= half_size) {
+ if (!HAVE_al8_fast && p->size <= 4) {
+ return store_whole_le4(p->haddr, p->size, val_le);
+ } else if (HAVE_al8) {
+ return store_whole_le8(p->haddr, p->size, val_le);
+ } else {
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+ }
+ /* fall through */
+
+ case MO_ATOM_IFALIGN:
+ case MO_ATOM_WITHIN16:
+ case MO_ATOM_NONE:
+ return store_bytes_leN(p->haddr, p->size, val_le);
+
+ default:
+ g_assert_not_reached();
}
- return val_le;
}
/*
- * Wrapper for the above.
+ * Wrapper for the above, for 8 < size < 16.
*/
-static uint64_t do_st_leN(CPUArchState *env, MMULookupPageData *p,
- uint64_t val_le, int mmu_idx, uintptr_t ra)
+static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
+ Int128 val_le, int mmu_idx,
+ MemOp mop, uintptr_t ra)
{
+ int size = p->size;
+ MemOp atom;
+
if (unlikely(p->flags & TLB_MMIO)) {
- return do_st_mmio_leN(env, p, val_le, mmu_idx, ra);
+ p->size = 8;
+ do_st_mmio_leN(env, p, int128_getlo(val_le), mmu_idx, ra);
+ p->size = size - 8;
+ p->addr += 8;
+ return do_st_mmio_leN(env, p, int128_gethi(val_le), mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
- return val_le >> (p->size * 8);
- } else {
- return do_st_bytes_leN(p, val_le);
+ return int128_gethi(val_le) >> ((size - 8) * 8);
+ }
+
+ /*
+ * It is a given that we cross a page and therefore there is no atomicity
+ * for the store as a whole, but subobjects may need attention.
+ */
+ atom = mop & MO_ATOM_MASK;
+ switch (atom) {
+ case MO_ATOM_SUBALIGN:
+ store_parts_leN(p->haddr, 8, int128_getlo(val_le));
+ return store_parts_leN(p->haddr + 8, p->size - 8,
+ int128_gethi(val_le));
+
+ case MO_ATOM_WITHIN16_PAIR:
+ /* Since size > 8, this is the half that must be atomic. */
+ if (!HAVE_al16) {
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+ return store_whole_le16(p->haddr, p->size, val_le);
+
+ case MO_ATOM_IFALIGN_PAIR:
+ /*
+ * Since size > 8, both halves are misaligned,
+ * and so neither is atomic.
+ */
+ case MO_ATOM_IFALIGN:
+ case MO_ATOM_NONE:
+ stq_le_p(p->haddr, int128_getlo(val_le));
+ return store_bytes_leN(p->haddr + 8, p->size - 8,
+ int128_gethi(val_le));
+
+ default:
+ g_assert_not_reached();
}
}
@@ -2617,7 +2824,7 @@ static void do_st_2(CPUArchState *env, MMULookupPageData *p, uint16_t val,
if (memop & MO_BSWAP) {
val = bswap16(val);
}
- store_memop(p->haddr, val, MO_UW);
+ store_atom_2(env, ra, p->haddr, memop, val);
}
}
@@ -2633,7 +2840,7 @@ static void do_st_4(CPUArchState *env, MMULookupPageData *p, uint32_t val,
if (memop & MO_BSWAP) {
val = bswap32(val);
}
- store_memop(p->haddr, val, MO_UL);
+ store_atom_4(env, ra, p->haddr, memop, val);
}
}
@@ -2649,17 +2856,17 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
if (memop & MO_BSWAP) {
val = bswap64(val);
}
- store_memop(p->haddr, val, MO_UQ);
+ store_atom_8(env, ra, p->haddr, memop, val);
}
}
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
+void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
- validate_memop(oi, MO_UB);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
@@ -2688,17 +2895,10 @@ static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
}
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUW);
- do_st2_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUW);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env, addr, val, oi, retaddr);
}
@@ -2718,21 +2918,14 @@ static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap32(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra);
+ val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUL);
- do_st4_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUL);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env, addr, val, oi, retaddr);
}
@@ -2752,22 +2945,88 @@ static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
if ((l.memop & MO_BSWAP) != MO_LE) {
val = bswap64(val);
}
- val = do_st_leN(env, &l.page[0], val, l.mmu_idx, ra);
- (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, ra);
+ val = do_st_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ (void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEUQ);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env, addr, val, oi, retaddr);
}
-void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
+static void do_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra)
{
- validate_memop(oi, MO_BEUQ);
- do_st8_mmu(env, addr, val, oi, retaddr);
+ MMULookupLocals l;
+ bool crosspage;
+ uint64_t a, b;
+ int first;
+
+ crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
+ if (likely(!crosspage)) {
+ /* Swap to host endian if necessary, then store. */
+ if (l.memop & MO_BSWAP) {
+ val = bswap128(val);
+ }
+ if (unlikely(l.page[0].flags & TLB_MMIO)) {
+ QEMU_IOTHREAD_LOCK_GUARD();
+ if (HOST_BIG_ENDIAN) {
+ b = int128_getlo(val), a = int128_gethi(val);
+ } else {
+ a = int128_getlo(val), b = int128_gethi(val);
+ }
+ io_writex(env, l.page[0].full, l.mmu_idx, a, addr, ra, MO_64);
+ io_writex(env, l.page[0].full, l.mmu_idx, b, addr + 8, ra, MO_64);
+ } else if (unlikely(l.page[0].flags & TLB_DISCARD_WRITE)) {
+ /* nothing */
+ } else {
+ store_atom_16(env, ra, l.page[0].haddr, l.memop, val);
+ }
+ return;
+ }
+
+ first = l.page[0].size;
+ if (first == 8) {
+ MemOp mop8 = (l.memop & ~(MO_SIZE | MO_BSWAP)) | MO_64;
+
+ if (l.memop & MO_BSWAP) {
+ val = bswap128(val);
+ }
+ if (HOST_BIG_ENDIAN) {
+ b = int128_getlo(val), a = int128_gethi(val);
+ } else {
+ a = int128_getlo(val), b = int128_gethi(val);
+ }
+ do_st_8(env, &l.page[0], a, l.mmu_idx, mop8, ra);
+ do_st_8(env, &l.page[1], b, l.mmu_idx, mop8, ra);
+ return;
+ }
+
+ if ((l.memop & MO_BSWAP) != MO_LE) {
+ val = bswap128(val);
+ }
+ if (first < 8) {
+ do_st_leN(env, &l.page[0], int128_getlo(val), l.mmu_idx, l.memop, ra);
+ val = int128_urshift(val, first * 8);
+ do_st16_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
+ } else {
+ b = do_st16_leN(env, &l.page[0], val, l.mmu_idx, l.memop, ra);
+ do_st_leN(env, &l.page[1], b, l.mmu_idx, l.memop, ra);
+ }
+}
+
+void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+ do_st16_mmu(env, addr, val, oi, retaddr);
+}
+
+void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
+{
+ helper_st16_mmu(env, addr, val, oi, GETPC());
}
/*
@@ -2782,104 +3041,72 @@ static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_ret_stb_mmu(env, addr, val, oi, retaddr);
+ helper_stb_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_be_stw_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW);
+ do_st2_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_be_stl_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL);
+ do_st4_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_be_stq_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ);
+ do_st8_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_le_stw_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW);
+ do_st2_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_le_stl_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL);
+ do_st4_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_le_stq_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ);
+ do_st8_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
-void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOpIdx oi, uintptr_t ra)
+void cpu_st16_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- MemOp mop = get_memop(oi);
- int mmu_idx = get_mmuidx(oi);
- MemOpIdx new_oi;
- unsigned a_bits;
-
- tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128));
- a_bits = get_alignment_bits(mop);
-
- /* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
- mmu_idx, ra);
- }
-
- /* Construct an unaligned 64-bit replacement MemOpIdx. */
- mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
- new_oi = make_memop_idx(mop, mmu_idx);
-
- helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra);
- helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra);
-
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128));
+ do_st16_mmu(env, addr, val, oi, retaddr);
+ plugin_store_cb(env, addr, oi);
}
-void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOpIdx oi, uintptr_t ra)
+void cpu_st16_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- MemOp mop = get_memop(oi);
- int mmu_idx = get_mmuidx(oi);
- MemOpIdx new_oi;
- unsigned a_bits;
-
- tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128));
- a_bits = get_alignment_bits(mop);
-
- /* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
- mmu_idx, ra);
- }
-
- /* Construct an unaligned 64-bit replacement MemOpIdx. */
- mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
- new_oi = make_memop_idx(mop, mmu_idx);
-
- helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra);
- helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra);
-
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128));
+ do_st16_mmu(env, addr, val, oi, retaddr);
+ plugin_store_cb(env, addr, oi);
}
#include "ldst_common.c.inc"
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
new file mode 100644
index 0000000..ba5db7c
--- /dev/null
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -0,0 +1,1262 @@
+/*
+ * Routines common to user and system emulation of load/store.
+ *
+ * Copyright (c) 2022 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifdef CONFIG_ATOMIC64
+# define HAVE_al8 true
+#else
+# define HAVE_al8 false
+#endif
+#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
+
+/*
+ * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
+ * that are supported by the host, e.g. s390x. We can force the pointer to
+ * have our known alignment with __builtin_assume_aligned, however prior to
+ * GCC 13 that was only reliable with optimization enabled. See
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
+ */
+#if defined(CONFIG_ATOMIC128_OPT)
+# if !defined(__OPTIMIZE__)
+# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
+# endif
+# define CONFIG_ATOMIC128
+#endif
+#ifndef ATTRIBUTE_ATOMIC128_OPT
+# define ATTRIBUTE_ATOMIC128_OPT
+#endif
+
+#if defined(CONFIG_ATOMIC128)
+# define HAVE_al16_fast true
+#else
+# define HAVE_al16_fast false
+#endif
+#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
+# define HAVE_al16 true
+#else
+# define HAVE_al16 false
+#endif
+
+
+/**
+ * required_atomicity:
+ *
+ * Return the lg2 bytes of atomicity required by @memop for @p.
+ * If the operation must be split into two operations to be
+ * examined separately for atomicity, return -lg2.
+ */
+static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
+{
+ MemOp atom = memop & MO_ATOM_MASK;
+ MemOp size = memop & MO_SIZE;
+ MemOp half = size ? size - 1 : 0;
+ unsigned tmp;
+ int atmax;
+
+ switch (atom) {
+ case MO_ATOM_NONE:
+ atmax = MO_8;
+ break;
+
+ case MO_ATOM_IFALIGN_PAIR:
+ size = half;
+ /* fall through */
+
+ case MO_ATOM_IFALIGN:
+ tmp = (1 << size) - 1;
+ atmax = p & tmp ? MO_8 : size;
+ break;
+
+ case MO_ATOM_WITHIN16:
+ tmp = p & 15;
+ atmax = (tmp + (1 << size) <= 16 ? size : MO_8);
+ break;
+
+ case MO_ATOM_WITHIN16_PAIR:
+ tmp = p & 15;
+ if (tmp + (1 << size) <= 16) {
+ atmax = size;
+ } else if (tmp + (1 << half) == 16) {
+ /*
+ * The pair exactly straddles the boundary.
+ * Both halves are naturally aligned and atomic.
+ */
+ atmax = half;
+ } else {
+ /*
+ * One of the pair crosses the boundary, and is non-atomic.
+ * The other of the pair does not cross, and is atomic.
+ */
+ atmax = -half;
+ }
+ break;
+
+ case MO_ATOM_SUBALIGN:
+ /*
+ * Examine the alignment of p to determine if there are subobjects
+ * that must be aligned. Note that we only really need ctz4() --
+ * any more sigificant bits are discarded by the immediately
+ * following comparison.
+ */
+ tmp = ctz32(p);
+ atmax = MIN(size, tmp);
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+
+ /*
+ * Here we have the architectural atomicity of the operation.
+ * However, when executing in a serial context, we need no extra
+ * host atomicity in order to avoid racing. This reduction
+ * avoids looping with cpu_loop_exit_atomic.
+ */
+ if (cpu_in_serial_context(env_cpu(env))) {
+ return MO_8;
+ }
+ return atmax;
+}
+
+/**
+ * load_atomic2:
+ * @pv: host address
+ *
+ * Atomically load 2 aligned bytes from @pv.
+ */
+static inline uint16_t load_atomic2(void *pv)
+{
+ uint16_t *p = __builtin_assume_aligned(pv, 2);
+ return qatomic_read(p);
+}
+
+/**
+ * load_atomic4:
+ * @pv: host address
+ *
+ * Atomically load 4 aligned bytes from @pv.
+ */
+static inline uint32_t load_atomic4(void *pv)
+{
+ uint32_t *p = __builtin_assume_aligned(pv, 4);
+ return qatomic_read(p);
+}
+
+/**
+ * load_atomic8:
+ * @pv: host address
+ *
+ * Atomically load 8 aligned bytes from @pv.
+ */
+static inline uint64_t load_atomic8(void *pv)
+{
+ uint64_t *p = __builtin_assume_aligned(pv, 8);
+
+ qemu_build_assert(HAVE_al8);
+ return qatomic_read__nocheck(p);
+}
+
+/**
+ * load_atomic16:
+ * @pv: host address
+ *
+ * Atomically load 16 aligned bytes from @pv.
+ */
+static inline Int128 ATTRIBUTE_ATOMIC128_OPT
+load_atomic16(void *pv)
+{
+#ifdef CONFIG_ATOMIC128
+ __uint128_t *p = __builtin_assume_aligned(pv, 16);
+ Int128Alias r;
+
+ r.u = qatomic_read__nocheck(p);
+ return r.s;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+/**
+ * load_atomic8_or_exit:
+ * @env: cpu context
+ * @ra: host unwind address
+ * @pv: host address
+ *
+ * Atomically load 8 aligned bytes from @pv.
+ * If this is not possible, longjmp out to restart serially.
+ */
+static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
+{
+ if (HAVE_al8) {
+ return load_atomic8(pv);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ /*
+ * If the page is not writable, then assume the value is immutable
+ * and requires no locking. This ignores the case of MAP_SHARED with
+ * another process, because the fallback start_exclusive solution
+ * provides no protection across processes.
+ */
+ if (!page_check_range(h2g(pv), 8, PAGE_WRITE)) {
+ uint64_t *p = __builtin_assume_aligned(pv, 8);
+ return *p;
+ }
+#endif
+
+ /* Ultimate fallback: re-execute in serial context. */
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+}
+
+/**
+ * load_atomic16_or_exit:
+ * @env: cpu context
+ * @ra: host unwind address
+ * @pv: host address
+ *
+ * Atomically load 16 aligned bytes from @pv.
+ * If this is not possible, longjmp out to restart serially.
+ */
+static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
+{
+ Int128 *p = __builtin_assume_aligned(pv, 16);
+
+ if (HAVE_al16_fast) {
+ return load_atomic16(p);
+ }
+
+#ifdef CONFIG_USER_ONLY
+ /*
+ * We can only use cmpxchg to emulate a load if the page is writable.
+ * If the page is not writable, then assume the value is immutable
+ * and requires no locking. This ignores the case of MAP_SHARED with
+ * another process, because the fallback start_exclusive solution
+ * provides no protection across processes.
+ */
+ if (!page_check_range(h2g(p), 16, PAGE_WRITE)) {
+ return *p;
+ }
+#endif
+
+ /*
+ * In system mode all guest pages are writable, and for user-only
+ * we have just checked writability. Try cmpxchg.
+ */
+#if defined(CONFIG_CMPXCHG128)
+ /* Swap 0 with 0, with the side-effect of returning the old value. */
+ {
+ Int128Alias r;
+ r.u = __sync_val_compare_and_swap_16((__uint128_t *)p, 0, 0);
+ return r.s;
+ }
+#endif
+
+ /* Ultimate fallback: re-execute in serial context. */
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+}
+
+/**
+ * load_atom_extract_al4x2:
+ * @pv: host address
+ *
+ * Load 4 bytes from @p, from two sequential atomic 4-byte loads.
+ */
+static uint32_t load_atom_extract_al4x2(void *pv)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int sh = (pi & 3) * 8;
+ uint32_t a, b;
+
+ pv = (void *)(pi & ~3);
+ a = load_atomic4(pv);
+ b = load_atomic4(pv + 4);
+
+ if (HOST_BIG_ENDIAN) {
+ return (a << sh) | (b >> (-sh & 31));
+ } else {
+ return (a >> sh) | (b << (-sh & 31));
+ }
+}
+
+/**
+ * load_atom_extract_al8x2:
+ * @pv: host address
+ *
+ * Load 8 bytes from @p, from two sequential atomic 8-byte loads.
+ */
+static uint64_t load_atom_extract_al8x2(void *pv)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int sh = (pi & 7) * 8;
+ uint64_t a, b;
+
+ pv = (void *)(pi & ~7);
+ a = load_atomic8(pv);
+ b = load_atomic8(pv + 8);
+
+ if (HOST_BIG_ENDIAN) {
+ return (a << sh) | (b >> (-sh & 63));
+ } else {
+ return (a >> sh) | (b << (-sh & 63));
+ }
+}
+
+/**
+ * load_atom_extract_al8_or_exit:
+ * @env: cpu context
+ * @ra: host unwind address
+ * @pv: host address
+ * @s: object size in bytes, @s <= 4.
+ *
+ * Atomically load @s bytes from @p, when p % s != 0, and [p, p+s-1] does
+ * not cross an 8-byte boundary. This means that we can perform an atomic
+ * 8-byte load and extract.
+ * The value is returned in the low bits of a uint32_t.
+ */
+static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
+ void *pv, int s)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int o = pi & 7;
+ int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
+
+ pv = (void *)(pi & ~7);
+ return load_atomic8_or_exit(env, ra, pv) >> shr;
+}
+
+/**
+ * load_atom_extract_al16_or_exit:
+ * @env: cpu context
+ * @ra: host unwind address
+ * @p: host address
+ * @s: object size in bytes, @s <= 8.
+ *
+ * Atomically load @s bytes from @p, when p % 16 < 8
+ * and p % 16 + s > 8. I.e. does not cross a 16-byte
+ * boundary, but *does* cross an 8-byte boundary.
+ * This is the slow version, so we must have eliminated
+ * any faster load_atom_extract_al8_or_exit case.
+ *
+ * If this is not possible, longjmp out to restart serially.
+ */
+static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
+ void *pv, int s)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int o = pi & 7;
+ int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8;
+ Int128 r;
+
+ /*
+ * Note constraints above: p & 8 must be clear.
+ * Provoke SIGBUS if possible otherwise.
+ */
+ pv = (void *)(pi & ~7);
+ r = load_atomic16_or_exit(env, ra, pv);
+
+ r = int128_urshift(r, shr);
+ return int128_getlo(r);
+}
+
+/**
+ * load_atom_extract_al16_or_al8:
+ * @p: host address
+ * @s: object size in bytes, @s <= 8.
+ *
+ * Load @s bytes from @p, when p % s != 0. If [p, p+s-1] does not
+ * cross an 16-byte boundary then the access must be 16-byte atomic,
+ * otherwise the access must be 8-byte atomic.
+ */
+static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
+load_atom_extract_al16_or_al8(void *pv, int s)
+{
+#if defined(CONFIG_ATOMIC128)
+ uintptr_t pi = (uintptr_t)pv;
+ int o = pi & 7;
+ int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8;
+ __uint128_t r;
+
+ pv = (void *)(pi & ~7);
+ if (pi & 8) {
+ uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
+ uint64_t a = qatomic_read__nocheck(p8);
+ uint64_t b = qatomic_read__nocheck(p8 + 1);
+
+ if (HOST_BIG_ENDIAN) {
+ r = ((__uint128_t)a << 64) | b;
+ } else {
+ r = ((__uint128_t)b << 64) | a;
+ }
+ } else {
+ __uint128_t *p16 = __builtin_assume_aligned(pv, 16, 0);
+ r = qatomic_read__nocheck(p16);
+ }
+ return r >> shr;
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+/**
+ * load_atom_4_by_2:
+ * @pv: host address
+ *
+ * Load 4 bytes from @pv, with two 2-byte atomic loads.
+ */
+static inline uint32_t load_atom_4_by_2(void *pv)
+{
+ uint32_t a = load_atomic2(pv);
+ uint32_t b = load_atomic2(pv + 2);
+
+ if (HOST_BIG_ENDIAN) {
+ return (a << 16) | b;
+ } else {
+ return (b << 16) | a;
+ }
+}
+
+/**
+ * load_atom_8_by_2:
+ * @pv: host address
+ *
+ * Load 8 bytes from @pv, with four 2-byte atomic loads.
+ */
+static inline uint64_t load_atom_8_by_2(void *pv)
+{
+ uint32_t a = load_atom_4_by_2(pv);
+ uint32_t b = load_atom_4_by_2(pv + 4);
+
+ if (HOST_BIG_ENDIAN) {
+ return ((uint64_t)a << 32) | b;
+ } else {
+ return ((uint64_t)b << 32) | a;
+ }
+}
+
+/**
+ * load_atom_8_by_4:
+ * @pv: host address
+ *
+ * Load 8 bytes from @pv, with two 4-byte atomic loads.
+ */
+static inline uint64_t load_atom_8_by_4(void *pv)
+{
+ uint32_t a = load_atomic4(pv);
+ uint32_t b = load_atomic4(pv + 4);
+
+ if (HOST_BIG_ENDIAN) {
+ return ((uint64_t)a << 32) | b;
+ } else {
+ return ((uint64_t)b << 32) | a;
+ }
+}
+
+/**
+ * load_atom_8_by_8_or_4:
+ * @pv: host address
+ *
+ * Load 8 bytes from aligned @pv, with at least 4-byte atomicity.
+ */
+static inline uint64_t load_atom_8_by_8_or_4(void *pv)
+{
+ if (HAVE_al8_fast) {
+ return load_atomic8(pv);
+ } else {
+ return load_atom_8_by_4(pv);
+ }
+}
+
+/**
+ * load_atom_2:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 2 bytes from @p, honoring the atomicity of @memop.
+ */
+static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int atmax;
+
+ if (likely((pi & 1) == 0)) {
+ return load_atomic2(pv);
+ }
+ if (HAVE_al16_fast) {
+ return load_atom_extract_al16_or_al8(pv, 2);
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+ switch (atmax) {
+ case MO_8:
+ return lduw_he_p(pv);
+ case MO_16:
+ /* The only case remaining is MO_ATOM_WITHIN16. */
+ if (!HAVE_al8_fast && (pi & 3) == 1) {
+ /* Big or little endian, we want the middle two bytes. */
+ return load_atomic4(pv - 1) >> 8;
+ }
+ if ((pi & 15) != 7) {
+ return load_atom_extract_al8_or_exit(env, ra, pv, 2);
+ }
+ return load_atom_extract_al16_or_exit(env, ra, pv, 2);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
+ * load_atom_4:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 4 bytes from @p, honoring the atomicity of @memop.
+ */
+static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int atmax;
+
+ if (likely((pi & 3) == 0)) {
+ return load_atomic4(pv);
+ }
+ if (HAVE_al16_fast) {
+ return load_atom_extract_al16_or_al8(pv, 4);
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+ switch (atmax) {
+ case MO_8:
+ case MO_16:
+ case -MO_16:
+ /*
+ * For MO_ATOM_IFALIGN, this is more atomicity than required,
+ * but it's trivially supported on all hosts, better than 4
+ * individual byte loads (when the host requires alignment),
+ * and overlaps with the MO_ATOM_SUBALIGN case of p % 2 == 0.
+ */
+ return load_atom_extract_al4x2(pv);
+ case MO_32:
+ if (!(pi & 4)) {
+ return load_atom_extract_al8_or_exit(env, ra, pv, 4);
+ }
+ return load_atom_extract_al16_or_exit(env, ra, pv, 4);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
+ * load_atom_8:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 8 bytes from @p, honoring the atomicity of @memop.
+ */
+static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int atmax;
+
+ /*
+ * If the host does not support 8-byte atomics, wait until we have
+ * examined the atomicity parameters below.
+ */
+ if (HAVE_al8 && likely((pi & 7) == 0)) {
+ return load_atomic8(pv);
+ }
+ if (HAVE_al16_fast) {
+ return load_atom_extract_al16_or_al8(pv, 8);
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+ if (atmax == MO_64) {
+ if (!HAVE_al8 && (pi & 7) == 0) {
+ load_atomic8_or_exit(env, ra, pv);
+ }
+ return load_atom_extract_al16_or_exit(env, ra, pv, 8);
+ }
+ if (HAVE_al8_fast) {
+ return load_atom_extract_al8x2(pv);
+ }
+ switch (atmax) {
+ case MO_8:
+ return ldq_he_p(pv);
+ case MO_16:
+ return load_atom_8_by_2(pv);
+ case MO_32:
+ return load_atom_8_by_4(pv);
+ case -MO_32:
+ if (HAVE_al8) {
+ return load_atom_extract_al8x2(pv);
+ }
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
+ * load_atom_16:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 16 bytes from @p, honoring the atomicity of @memop.
+ */
+static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int atmax;
+ Int128 r;
+ uint64_t a, b;
+
+ /*
+ * If the host does not support 16-byte atomics, wait until we have
+ * examined the atomicity parameters below.
+ */
+ if (HAVE_al16_fast && likely((pi & 15) == 0)) {
+ return load_atomic16(pv);
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+ switch (atmax) {
+ case MO_8:
+ memcpy(&r, pv, 16);
+ return r;
+ case MO_16:
+ a = load_atom_8_by_2(pv);
+ b = load_atom_8_by_2(pv + 8);
+ break;
+ case MO_32:
+ a = load_atom_8_by_4(pv);
+ b = load_atom_8_by_4(pv + 8);
+ break;
+ case MO_64:
+ if (!HAVE_al8) {
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+ a = load_atomic8(pv);
+ b = load_atomic8(pv + 8);
+ break;
+ case -MO_64:
+ if (!HAVE_al8) {
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ }
+ a = load_atom_extract_al8x2(pv);
+ b = load_atom_extract_al8x2(pv + 8);
+ break;
+ case MO_128:
+ return load_atomic16_or_exit(env, ra, pv);
+ default:
+ g_assert_not_reached();
+ }
+ return int128_make128(HOST_BIG_ENDIAN ? b : a, HOST_BIG_ENDIAN ? a : b);
+}
+
+/**
+ * store_atomic2:
+ * @pv: host address
+ * @val: value to store
+ *
+ * Atomically store 2 aligned bytes to @pv.
+ */
+static inline void store_atomic2(void *pv, uint16_t val)
+{
+ uint16_t *p = __builtin_assume_aligned(pv, 2);
+ qatomic_set(p, val);
+}
+
+/**
+ * store_atomic4:
+ * @pv: host address
+ * @val: value to store
+ *
+ * Atomically store 4 aligned bytes to @pv.
+ */
+static inline void store_atomic4(void *pv, uint32_t val)
+{
+ uint32_t *p = __builtin_assume_aligned(pv, 4);
+ qatomic_set(p, val);
+}
+
+/**
+ * store_atomic8:
+ * @pv: host address
+ * @val: value to store
+ *
+ * Atomically store 8 aligned bytes to @pv.
+ */
+static inline void store_atomic8(void *pv, uint64_t val)
+{
+ uint64_t *p = __builtin_assume_aligned(pv, 8);
+
+ qemu_build_assert(HAVE_al8);
+ qatomic_set__nocheck(p, val);
+}
+
+/**
+ * store_atomic16:
+ * @pv: host address
+ * @val: value to store
+ *
+ * Atomically store 16 aligned bytes to @pv.
+ */
+static inline void ATTRIBUTE_ATOMIC128_OPT
+store_atomic16(void *pv, Int128Alias val)
+{
+#if defined(CONFIG_ATOMIC128)
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ qatomic_set__nocheck(pu, val.u);
+#elif defined(CONFIG_CMPXCHG128)
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ __uint128_t o;
+
+ /*
+ * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
+ * defer to libatomic, so we must use __sync_*_compare_and_swap_16
+ * and accept the sequential consistency that comes with it.
+ */
+ do {
+ o = *pu;
+ } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+/**
+ * store_atom_4x2
+ */
+static inline void store_atom_4_by_2(void *pv, uint32_t val)
+{
+ store_atomic2(pv, val >> (HOST_BIG_ENDIAN ? 16 : 0));
+ store_atomic2(pv + 2, val >> (HOST_BIG_ENDIAN ? 0 : 16));
+}
+
+/**
+ * store_atom_8_by_2
+ */
+static inline void store_atom_8_by_2(void *pv, uint64_t val)
+{
+ store_atom_4_by_2(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0));
+ store_atom_4_by_2(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32));
+}
+
+/**
+ * store_atom_8_by_4
+ */
+static inline void store_atom_8_by_4(void *pv, uint64_t val)
+{
+ store_atomic4(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0));
+ store_atomic4(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32));
+}
+
+/**
+ * store_atom_insert_al4:
+ * @p: host address
+ * @val: shifted value to store
+ * @msk: mask for value to store
+ *
+ * Atomically store @val to @p, masked by @msk.
+ */
+static void store_atom_insert_al4(uint32_t *p, uint32_t val, uint32_t msk)
+{
+ uint32_t old, new;
+
+ p = __builtin_assume_aligned(p, 4);
+ old = qatomic_read(p);
+ do {
+ new = (old & ~msk) | val;
+ } while (!__atomic_compare_exchange_n(p, &old, new, true,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+}
+
+/**
+ * store_atom_insert_al8:
+ * @p: host address
+ * @val: shifted value to store
+ * @msk: mask for value to store
+ *
+ * Atomically store @val to @p masked by @msk.
+ */
+static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
+{
+ uint64_t old, new;
+
+ qemu_build_assert(HAVE_al8);
+ p = __builtin_assume_aligned(p, 8);
+ old = qatomic_read__nocheck(p);
+ do {
+ new = (old & ~msk) | val;
+ } while (!__atomic_compare_exchange_n(p, &old, new, true,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+}
+
+/**
+ * store_atom_insert_al16:
+ * @p: host address
+ * @val: shifted value to store
+ * @msk: mask for value to store
+ *
+ * Atomically store @val to @p masked by @msk.
+ */
+static void ATTRIBUTE_ATOMIC128_OPT
+store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
+{
+#if defined(CONFIG_ATOMIC128)
+ __uint128_t *pu, old, new;
+
+ /* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
+ pu = __builtin_assume_aligned(ps, 16);
+ old = *pu;
+ do {
+ new = (old & ~msk.u) | val.u;
+ } while (!__atomic_compare_exchange_n(pu, &old, new, true,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+#elif defined(CONFIG_CMPXCHG128)
+ __uint128_t *pu, old, new;
+
+ /*
+ * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
+ * defer to libatomic, so we must use __sync_*_compare_and_swap_16
+ * and accept the sequential consistency that comes with it.
+ */
+ pu = __builtin_assume_aligned(ps, 16);
+ do {
+ old = *pu;
+ new = (old & ~msk.u) | val.u;
+ } while (!__sync_bool_compare_and_swap_16(pu, old, new));
+#else
+ qemu_build_not_reached();
+#endif
+}
+
+/**
+ * store_bytes_leN:
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * Store @size bytes at @p. The bytes to store are extracted in little-endian order
+ * from @val_le; return the bytes of @val_le beyond @size that have not been stored.
+ */
+static uint64_t store_bytes_leN(void *pv, int size, uint64_t val_le)
+{
+ uint8_t *p = pv;
+ for (int i = 0; i < size; i++, val_le >>= 8) {
+ p[i] = val_le;
+ }
+ return val_le;
+}
+
+/**
+ * store_parts_leN
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically on each aligned part.
+ */
+G_GNUC_UNUSED
+static uint64_t store_parts_leN(void *pv, int size, uint64_t val_le)
+{
+ do {
+ int n;
+
+ /* Find minimum of alignment and size */
+ switch (((uintptr_t)pv | size) & 7) {
+ case 4:
+ store_atomic4(pv, le32_to_cpu(val_le));
+ val_le >>= 32;
+ n = 4;
+ break;
+ case 2:
+ case 6:
+ store_atomic2(pv, le16_to_cpu(val_le));
+ val_le >>= 16;
+ n = 2;
+ break;
+ default:
+ *(uint8_t *)pv = val_le;
+ val_le >>= 8;
+ n = 1;
+ break;
+ case 0:
+ g_assert_not_reached();
+ }
+ pv += n;
+ size -= n;
+ } while (size != 0);
+
+ return val_le;
+}
+
+/**
+ * store_whole_le4
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically as a whole.
+ * Four aligned bytes are guaranteed to cover the store.
+ */
+static uint64_t store_whole_le4(void *pv, int size, uint64_t val_le)
+{
+ int sz = size * 8;
+ int o = (uintptr_t)pv & 3;
+ int sh = o * 8;
+ uint32_t m = MAKE_64BIT_MASK(0, sz);
+ uint32_t v;
+
+ if (HOST_BIG_ENDIAN) {
+ v = bswap32(val_le) >> sh;
+ m = bswap32(m) >> sh;
+ } else {
+ v = val_le << sh;
+ m <<= sh;
+ }
+ store_atom_insert_al4(pv - o, v, m);
+ return val_le >> sz;
+}
+
+/**
+ * store_whole_le8
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically as a whole.
+ * Eight aligned bytes are guaranteed to cover the store.
+ */
+static uint64_t store_whole_le8(void *pv, int size, uint64_t val_le)
+{
+ int sz = size * 8;
+ int o = (uintptr_t)pv & 7;
+ int sh = o * 8;
+ uint64_t m = MAKE_64BIT_MASK(0, sz);
+ uint64_t v;
+
+ qemu_build_assert(HAVE_al8);
+ if (HOST_BIG_ENDIAN) {
+ v = bswap64(val_le) >> sh;
+ m = bswap64(m) >> sh;
+ } else {
+ v = val_le << sh;
+ m <<= sh;
+ }
+ store_atom_insert_al8(pv - o, v, m);
+ return val_le >> sz;
+}
+
+/**
+ * store_whole_le16
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically as a whole.
+ * 16 aligned bytes are guaranteed to cover the store.
+ */
+static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
+{
+ int sz = size * 8;
+ int o = (uintptr_t)pv & 15;
+ int sh = o * 8;
+ Int128 m, v;
+
+ qemu_build_assert(HAVE_al16);
+
+ /* Like MAKE_64BIT_MASK(0, sz), but larger. */
+ if (sz <= 64) {
+ m = int128_make64(MAKE_64BIT_MASK(0, sz));
+ } else {
+ m = int128_make128(-1, MAKE_64BIT_MASK(0, sz - 64));
+ }
+
+ if (HOST_BIG_ENDIAN) {
+ v = int128_urshift(bswap128(val_le), sh);
+ m = int128_urshift(bswap128(m), sh);
+ } else {
+ v = int128_lshift(val_le, sh);
+ m = int128_lshift(m, sh);
+ }
+ store_atom_insert_al16(pv - o, v, m);
+
+ /* Unused if sz <= 64. */
+ return int128_gethi(val_le) >> (sz - 64);
+}
+
+/**
+ * store_atom_2:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 2 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_2(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop, uint16_t val)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int atmax;
+
+ if (likely((pi & 1) == 0)) {
+ store_atomic2(pv, val);
+ return;
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+ if (atmax == MO_8) {
+ stw_he_p(pv, val);
+ return;
+ }
+
+ /*
+ * The only case remaining is MO_ATOM_WITHIN16.
+ * Big or little endian, we want the middle two bytes in each test.
+ */
+ if ((pi & 3) == 1) {
+ store_atom_insert_al4(pv - 1, (uint32_t)val << 8, MAKE_64BIT_MASK(8, 16));
+ return;
+ } else if ((pi & 7) == 3) {
+ if (HAVE_al8) {
+ store_atom_insert_al8(pv - 3, (uint64_t)val << 24, MAKE_64BIT_MASK(24, 16));
+ return;
+ }
+ } else if ((pi & 15) == 7) {
+ if (HAVE_al16) {
+ Int128 v = int128_lshift(int128_make64(val), 56);
+ Int128 m = int128_lshift(int128_make64(0xffff), 56);
+ store_atom_insert_al16(pv - 7, v, m);
+ return;
+ }
+ } else {
+ g_assert_not_reached();
+ }
+
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+}
+
+/**
+ * store_atom_4:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 4 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_4(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop, uint32_t val)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int atmax;
+
+ if (likely((pi & 3) == 0)) {
+ store_atomic4(pv, val);
+ return;
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+ switch (atmax) {
+ case MO_8:
+ stl_he_p(pv, val);
+ return;
+ case MO_16:
+ store_atom_4_by_2(pv, val);
+ return;
+ case -MO_16:
+ {
+ uint32_t val_le = cpu_to_le32(val);
+ int s2 = pi & 3;
+ int s1 = 4 - s2;
+
+ switch (s2) {
+ case 1:
+ val_le = store_whole_le4(pv, s1, val_le);
+ *(uint8_t *)(pv + 3) = val_le;
+ break;
+ case 3:
+ *(uint8_t *)pv = val_le;
+ store_whole_le4(pv + 1, s2, val_le >> 8);
+ break;
+ case 0: /* aligned */
+ case 2: /* atmax MO_16 */
+ default:
+ g_assert_not_reached();
+ }
+ }
+ return;
+ case MO_32:
+ if ((pi & 7) < 4) {
+ if (HAVE_al8) {
+ store_whole_le8(pv, 4, cpu_to_le32(val));
+ return;
+ }
+ } else {
+ if (HAVE_al16) {
+ store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val)));
+ return;
+ }
+ }
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/**
+ * store_atom_8:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 8 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_8(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop, uint64_t val)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ int atmax;
+
+ if (HAVE_al8 && likely((pi & 7) == 0)) {
+ store_atomic8(pv, val);
+ return;
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+ switch (atmax) {
+ case MO_8:
+ stq_he_p(pv, val);
+ return;
+ case MO_16:
+ store_atom_8_by_2(pv, val);
+ return;
+ case MO_32:
+ store_atom_8_by_4(pv, val);
+ return;
+ case -MO_32:
+ if (HAVE_al8) {
+ uint64_t val_le = cpu_to_le64(val);
+ int s2 = pi & 7;
+ int s1 = 8 - s2;
+
+ switch (s2) {
+ case 1 ... 3:
+ val_le = store_whole_le8(pv, s1, val_le);
+ store_bytes_leN(pv + s1, s2, val_le);
+ break;
+ case 5 ... 7:
+ val_le = store_bytes_leN(pv, s1, val_le);
+ store_whole_le8(pv + s1, s2, val_le);
+ break;
+ case 0: /* aligned */
+ case 4: /* atmax MO_32 */
+ default:
+ g_assert_not_reached();
+ }
+ return;
+ }
+ break;
+ case MO_64:
+ if (HAVE_al16) {
+ store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val)));
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+}
+
+/**
+ * store_atom_16:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 16 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_16(CPUArchState *env, uintptr_t ra,
+ void *pv, MemOp memop, Int128 val)
+{
+ uintptr_t pi = (uintptr_t)pv;
+ uint64_t a, b;
+ int atmax;
+
+ if (HAVE_al16_fast && likely((pi & 15) == 0)) {
+ store_atomic16(pv, val);
+ return;
+ }
+
+ atmax = required_atomicity(env, pi, memop);
+
+ a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
+ b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
+ switch (atmax) {
+ case MO_8:
+ memcpy(pv, &val, 16);
+ return;
+ case MO_16:
+ store_atom_8_by_2(pv, a);
+ store_atom_8_by_2(pv + 8, b);
+ return;
+ case MO_32:
+ store_atom_8_by_4(pv, a);
+ store_atom_8_by_4(pv + 8, b);
+ return;
+ case MO_64:
+ if (HAVE_al8) {
+ store_atomic8(pv, a);
+ store_atomic8(pv + 8, b);
+ return;
+ }
+ break;
+ case -MO_64:
+ if (HAVE_al16) {
+ uint64_t val_le;
+ int s2 = pi & 15;
+ int s1 = 16 - s2;
+
+ if (HOST_BIG_ENDIAN) {
+ val = bswap128(val);
+ }
+ switch (s2) {
+ case 1 ... 7:
+ val_le = store_whole_le16(pv, s1, val);
+ store_bytes_leN(pv + s1, s2, val_le);
+ break;
+ case 9 ... 15:
+ store_bytes_leN(pv, s1, int128_getlo(val));
+ val = int128_urshift(val, s1 * 8);
+ store_whole_le16(pv + s1, s2, val);
+ break;
+ case 0: /* aligned */
+ case 8: /* atmax MO_64 */
+ default:
+ g_assert_not_reached();
+ }
+ return;
+ }
+ break;
+ case MO_128:
+ if (HAVE_al16) {
+ store_atomic16(pv, val);
+ return;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ cpu_loop_exit_atomic(env_cpu(env), ra);
+}
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c
index 5efb8db..34be1b9 100644
--- a/accel/tcg/plugin-gen.c
+++ b/accel/tcg/plugin-gen.c
@@ -92,27 +92,6 @@ void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index,
void *userdata)
{ }
-static void do_gen_mem_cb(TCGv vaddr, uint32_t info)
-{
- TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
- TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
- TCGv_i64 vaddr64 = tcg_temp_ebb_new_i64();
- TCGv_ptr udata = tcg_temp_ebb_new_ptr();
-
- tcg_gen_movi_i32(meminfo, info);
- tcg_gen_movi_ptr(udata, 0);
- tcg_gen_ld_i32(cpu_index, cpu_env,
- -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
- tcg_gen_extu_tl_i64(vaddr64, vaddr);
-
- gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata);
-
- tcg_temp_free_ptr(udata);
- tcg_temp_free_i64(vaddr64);
- tcg_temp_free_i32(meminfo);
- tcg_temp_free_i32(cpu_index);
-}
-
static void gen_empty_udata_cb(void)
{
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
@@ -145,9 +124,22 @@ static void gen_empty_inline_cb(void)
tcg_temp_free_i64(val);
}
-static void gen_empty_mem_cb(TCGv addr, uint32_t info)
+static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
{
- do_gen_mem_cb(addr, info);
+ TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
+ TCGv_i32 meminfo = tcg_temp_ebb_new_i32();
+ TCGv_ptr udata = tcg_temp_ebb_new_ptr();
+
+ tcg_gen_movi_i32(meminfo, info);
+ tcg_gen_movi_ptr(udata, 0);
+ tcg_gen_ld_i32(cpu_index, cpu_env,
+ -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
+
+ gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
+
+ tcg_temp_free_ptr(udata);
+ tcg_temp_free_i32(meminfo);
+ tcg_temp_free_i32(cpu_index);
}
/*
@@ -202,35 +194,17 @@ static void plugin_gen_empty_callback(enum plugin_gen_from from)
}
}
-union mem_gen_fn {
- void (*mem_fn)(TCGv, uint32_t);
- void (*inline_fn)(void);
-};
-
-static void gen_mem_wrapped(enum plugin_gen_cb type,
- const union mem_gen_fn *f, TCGv addr,
- uint32_t info, bool is_mem)
+void plugin_gen_empty_mem_callback(TCGv_i64 addr, uint32_t info)
{
enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info);
- gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw);
- if (is_mem) {
- f->mem_fn(addr, info);
- } else {
- f->inline_fn();
- }
+ gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_MEM, rw);
+ gen_empty_mem_cb(addr, info);
tcg_gen_plugin_cb_end();
-}
-void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info)
-{
- union mem_gen_fn fn;
-
- fn.mem_fn = gen_empty_mem_cb;
- gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true);
-
- fn.inline_fn = gen_empty_inline_cb;
- gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false);
+ gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, PLUGIN_GEN_CB_INLINE, rw);
+ gen_empty_inline_cb();
+ tcg_gen_plugin_cb_end();
}
static TCGOp *find_op(TCGOp *op, TCGOpcode opc)
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
index b8e6421..6f8c206 100644
--- a/accel/tcg/tcg-runtime.h
+++ b/accel/tcg/tcg-runtime.h
@@ -39,62 +39,65 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
#endif /* IN_HELPER_PROTO */
+DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32)
+DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32)
+
DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
- i32, env, tl, i32, i32, i32)
+ i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
- i32, env, tl, i32, i32, i32)
+ i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
- i32, env, tl, i32, i32, i32)
+ i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
- i32, env, tl, i32, i32, i32)
+ i32, env, i64, i32, i32, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
- i32, env, tl, i32, i32, i32)
+ i32, env, i64, i32, i32, i32)
#ifdef CONFIG_ATOMIC64
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
- i64, env, tl, i64, i64, i32)
+ i64, env, i64, i64, i64, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
- i64, env, tl, i64, i64, i32)
+ i64, env, i64, i64, i64, i32)
#endif
#ifdef CONFIG_CMPXCHG128
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
- i128, env, tl, i128, i128, i32)
+ i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
- i128, env, tl, i128, i128, i32)
+ i128, env, i64, i128, i128, i32)
#endif
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG,
- i128, env, tl, i128, i128, i32)
+ i128, env, i64, i128, i128, i32)
DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG,
- i128, env, tl, i128, i128, i32)
+ i128, env, i64, i128, i128, i32)
#ifdef CONFIG_ATOMIC64
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \
- TCG_CALL_NO_WG, i64, env, tl, i64, i32) \
+ TCG_CALL_NO_WG, i64, env, i64, i64, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \
- TCG_CALL_NO_WG, i64, env, tl, i64, i32)
+ TCG_CALL_NO_WG, i64, env, i64, i64, i32)
#else
#define GEN_ATOMIC_HELPERS(NAME) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32) \
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32) \
DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \
- TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+ TCG_CALL_NO_WG, i32, env, i64, i32, i32)
#endif /* CONFIG_ATOMIC64 */
GEN_ATOMIC_HELPERS(fetch_add)
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 5b13281..353849c 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -72,9 +72,11 @@ QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
TBContext tb_ctx;
-/* Encode VAL as a signed leb128 sequence at P.
- Return P incremented past the encoded value. */
-static uint8_t *encode_sleb128(uint8_t *p, target_long val)
+/*
+ * Encode VAL as a signed leb128 sequence at P.
+ * Return P incremented past the encoded value.
+ */
+static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
{
int more, byte;
@@ -92,21 +94,23 @@ static uint8_t *encode_sleb128(uint8_t *p, target_long val)
return p;
}
-/* Decode a signed leb128 sequence at *PP; increment *PP past the
- decoded value. Return the decoded value. */
-static target_long decode_sleb128(const uint8_t **pp)
+/*
+ * Decode a signed leb128 sequence at *PP; increment *PP past the
+ * decoded value. Return the decoded value.
+ */
+static int64_t decode_sleb128(const uint8_t **pp)
{
const uint8_t *p = *pp;
- target_long val = 0;
+ int64_t val = 0;
int byte, shift = 0;
do {
byte = *p++;
- val |= (target_ulong)(byte & 0x7f) << shift;
+ val |= (int64_t)(byte & 0x7f) << shift;
shift += 7;
} while (byte & 0x80);
if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
- val |= -(target_ulong)1 << shift;
+ val |= -(int64_t)1 << shift;
}
*pp = p;
@@ -132,7 +136,7 @@ static int encode_search(TranslationBlock *tb, uint8_t *block)
int i, j, n;
for (i = 0, n = tb->icount; i < n; ++i) {
- target_ulong prev;
+ uint64_t prev;
for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
if (i == 0) {
@@ -352,6 +356,13 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb_set_page_addr0(tb, phys_pc);
tb_set_page_addr1(tb, -1);
tcg_ctx->gen_tb = tb;
+ tcg_ctx->addr_type = TCG_TYPE_TL;
+#ifdef CONFIG_SOFTMMU
+ tcg_ctx->page_bits = TARGET_PAGE_BITS;
+ tcg_ctx->page_mask = TARGET_PAGE_MASK;
+ tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
+#endif
+
tb_overflow:
#ifdef CONFIG_PROFILER
@@ -444,7 +455,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
/* Dump header and the first instruction */
fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
fprintf(logfile,
- " -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
+ " -- guest addr 0x%016" PRIx64 " + tb prologue\n",
tcg_ctx->gen_insn_data[insn][0]);
chunk_start = tcg_ctx->gen_insn_end_off[insn];
disas(logfile, tb->tc.ptr, chunk_start);
@@ -457,7 +468,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
while (insn < tb->icount) {
size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
if (chunk_end > chunk_start) {
- fprintf(logfile, " -- guest addr 0x" TARGET_FMT_lx "\n",
+ fprintf(logfile, " -- guest addr 0x%016" PRIx64 "\n",
tcg_ctx->gen_insn_data[insn][0]);
disas(logfile, tb->tc.ptr + chunk_start,
chunk_end - chunk_start);
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index fc597a0..36ad828 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -889,35 +889,9 @@ void page_reset_target_data(target_ulong start, target_ulong last) { }
/* The softmmu versions of these helpers are in cputlb.c. */
-/*
- * Verify that we have passed the correct MemOp to the correct function.
- *
- * We could present one function to target code, and dispatch based on
- * the MemOp, but so far we have worked hard to avoid an indirect function
- * call along the memory path.
- */
-static void validate_memop(MemOpIdx oi, MemOp expected)
-{
-#ifdef CONFIG_DEBUG_TCG
- MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
- assert(have == expected);
-#endif
-}
-
-void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
-{
- cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
-}
-
-void helper_unaligned_st(CPUArchState *env, target_ulong addr)
-{
- cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
-}
-
-static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t ra, MMUAccessType type)
+static void *cpu_mmu_lookup(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra, MMUAccessType type)
{
- MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
void *ret;
@@ -931,116 +905,251 @@ static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
return ret;
}
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+#include "ldst_atomicity.c.inc"
+
+static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
uint8_t ret;
- validate_memop(oi, MO_UB);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
+ tcg_debug_assert((mop & MO_SIZE) == MO_8);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = ldub_p(haddr);
clear_helper_retaddr();
+ return ret;
+}
+
+tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ return do_ld1_mmu(env, addr, get_memop(oi), ra);
+}
+
+tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
+}
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
-uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+static uint16_t do_ld2_he_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
uint16_t ret;
- validate_memop(oi, MO_BEUW);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- ret = lduw_be_p(haddr);
+ tcg_debug_assert((mop & MO_SIZE) == MO_16);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
+ ret = load_atom_2(env, ra, haddr, mop);
clear_helper_retaddr();
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
-uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+ uint16_t ret = do_ld2_he_mmu(env, addr, mop, ra);
+
+ if (mop & MO_BSWAP) {
+ ret = bswap16(ret);
+ }
+ return ret;
+}
+
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+ int16_t ret = do_ld2_he_mmu(env, addr, mop, ra);
+
+ if (mop & MO_BSWAP) {
+ ret = bswap16(ret);
+ }
+ return ret;
+}
+
+uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
+ MemOp mop = get_memop(oi);
+ uint16_t ret;
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
+ ret = do_ld2_he_mmu(env, addr, mop, ra);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+ return cpu_to_be16(ret);
+}
+
+uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+ uint16_t ret;
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
+ ret = do_ld2_he_mmu(env, addr, mop, ra);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+ return cpu_to_le16(ret);
+}
+
+static uint32_t do_ld4_he_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
+{
void *haddr;
uint32_t ret;
- validate_memop(oi, MO_BEUL);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- ret = ldl_be_p(haddr);
+ tcg_debug_assert((mop & MO_SIZE) == MO_32);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
+ ret = load_atom_4(env, ra, haddr, mop);
clear_helper_retaddr();
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
-uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+ uint32_t ret = do_ld4_he_mmu(env, addr, mop, ra);
+
+ if (mop & MO_BSWAP) {
+ ret = bswap32(ret);
+ }
+ return ret;
+}
+
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+ int32_t ret = do_ld4_he_mmu(env, addr, mop, ra);
+
+ if (mop & MO_BSWAP) {
+ ret = bswap32(ret);
+ }
+ return ret;
+}
+
+uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
+ MemOp mop = get_memop(oi);
+ uint32_t ret;
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
+ ret = do_ld4_he_mmu(env, addr, mop, ra);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+ return cpu_to_be32(ret);
+}
+
+uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+ uint32_t ret;
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
+ ret = do_ld4_he_mmu(env, addr, mop, ra);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+ return cpu_to_le32(ret);
+}
+
+static uint64_t do_ld8_he_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
+{
void *haddr;
uint64_t ret;
- validate_memop(oi, MO_BEUQ);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- ret = ldq_be_p(haddr);
+ tcg_debug_assert((mop & MO_SIZE) == MO_64);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
+ ret = load_atom_8(env, ra, haddr, mop);
clear_helper_retaddr();
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return ret;
}
-uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint16_t ret;
+ MemOp mop = get_memop(oi);
+ uint64_t ret = do_ld8_he_mmu(env, addr, mop, ra);
- validate_memop(oi, MO_LEUW);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- ret = lduw_le_p(haddr);
- clear_helper_retaddr();
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+ if (mop & MO_BSWAP) {
+ ret = bswap64(ret);
+ }
return ret;
}
-uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
- uint32_t ret;
+ MemOp mop = get_memop(oi);
+ uint64_t ret;
- validate_memop(oi, MO_LEUL);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- ret = ldl_le_p(haddr);
- clear_helper_retaddr();
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
+ ret = do_ld8_he_mmu(env, addr, mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return ret;
+ return cpu_to_be64(ret);
}
uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
+ MemOp mop = get_memop(oi);
uint64_t ret;
- validate_memop(oi, MO_LEUQ);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- ret = ldq_le_p(haddr);
- clear_helper_retaddr();
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
+ ret = do_ld8_he_mmu(env, addr, mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+ return cpu_to_le64(ret);
+}
+
+static Int128 do_ld16_he_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
+{
+ void *haddr;
+ Int128 ret;
+
+ tcg_debug_assert((mop & MO_SIZE) == MO_128);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
+ ret = load_atom_16(env, ra, haddr, mop);
+ clear_helper_retaddr();
return ret;
}
+Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+ Int128 ret = do_ld16_he_mmu(env, addr, mop, ra);
+
+ if (mop & MO_BSWAP) {
+ ret = bswap128(ret);
+ }
+ return ret;
+}
+
+Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
+{
+ return helper_ld16_mmu(env, addr, oi, GETPC());
+}
+
Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
+ MemOp mop = get_memop(oi);
Int128 ret;
- validate_memop(oi, MO_128 | MO_BE);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- memcpy(&ret, haddr, 16);
- clear_helper_retaddr();
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
+ ret = do_ld16_he_mmu(env, addr, mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-
if (!HOST_BIG_ENDIAN) {
ret = bswap128(ret);
}
@@ -1050,132 +1159,218 @@ Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
+ MemOp mop = get_memop(oi);
Int128 ret;
- validate_memop(oi, MO_128 | MO_LE);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
- memcpy(&ret, haddr, 16);
- clear_helper_retaddr();
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
+ ret = do_ld16_he_mmu(env, addr, mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-
if (HOST_BIG_ENDIAN) {
ret = bswap128(ret);
}
return ret;
}
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
- MemOpIdx oi, uintptr_t ra)
+static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
- validate_memop(oi, MO_UB);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
+ tcg_debug_assert((mop & MO_SIZE) == MO_8);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
-void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
+ do_st1_mmu(env, addr, val, get_memop(oi), ra);
+}
- validate_memop(oi, MO_BEUW);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
- stw_be_p(haddr, val);
- clear_helper_retaddr();
+void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ do_st1_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
-void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
+static void do_st2_he_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
- validate_memop(oi, MO_BEUL);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
- stl_be_p(haddr, val);
+ tcg_debug_assert((mop & MO_SIZE) == MO_16);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ store_atom_2(env, ra, haddr, mop, val);
clear_helper_retaddr();
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
-void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
+ MemOp mop = get_memop(oi);
- validate_memop(oi, MO_BEUQ);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
- stq_be_p(haddr, val);
- clear_helper_retaddr();
+ if (mop & MO_BSWAP) {
+ val = bswap16(val);
+ }
+ do_st2_he_mmu(env, addr, val, mop, ra);
+}
+
+void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
+ do_st2_he_mmu(env, addr, be16_to_cpu(val), mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
+ MemOp mop = get_memop(oi);
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
+ do_st2_he_mmu(env, addr, le16_to_cpu(val), mop, ra);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+}
+
+static void do_st4_he_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+ MemOp mop, uintptr_t ra)
+{
void *haddr;
- validate_memop(oi, MO_LEUW);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
- stw_le_p(haddr, val);
+ tcg_debug_assert((mop & MO_SIZE) == MO_32);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ store_atom_4(env, ra, haddr, mop, val);
clear_helper_retaddr();
+}
+
+void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+
+ if (mop & MO_BSWAP) {
+ val = bswap32(val);
+ }
+ do_st4_he_mmu(env, addr, val, mop, ra);
+}
+
+void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
+ do_st4_he_mmu(env, addr, be32_to_cpu(val), mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
+ MemOp mop = get_memop(oi);
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
+ do_st4_he_mmu(env, addr, le32_to_cpu(val), mop, ra);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+}
+
+static void do_st8_he_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+ MemOp mop, uintptr_t ra)
+{
void *haddr;
- validate_memop(oi, MO_LEUL);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
- stl_le_p(haddr, val);
+ tcg_debug_assert((mop & MO_SIZE) == MO_64);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ store_atom_8(env, ra, haddr, mop, val);
clear_helper_retaddr();
+}
+
+void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+
+ if (mop & MO_BSWAP) {
+ val = bswap64(val);
+ }
+ do_st8_he_mmu(env, addr, val, mop, ra);
+}
+
+void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
+ do_st8_he_mmu(env, addr, cpu_to_be64(val), mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
+ MemOp mop = get_memop(oi);
+
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
+ do_st8_he_mmu(env, addr, cpu_to_le64(val), mop, ra);
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+}
+
+static void do_st16_he_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+ MemOp mop, uintptr_t ra)
+{
void *haddr;
- validate_memop(oi, MO_LEUQ);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
- stq_le_p(haddr, val);
+ tcg_debug_assert((mop & MO_SIZE) == MO_128);
+ haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+ store_atom_16(env, ra, haddr, mop, val);
clear_helper_retaddr();
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+}
+
+void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
+ MemOpIdx oi, uintptr_t ra)
+{
+ MemOp mop = get_memop(oi);
+
+ if (mop & MO_BSWAP) {
+ val = bswap128(val);
+ }
+ do_st16_he_mmu(env, addr, val, mop, ra);
+}
+
+void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
+{
+ helper_st16_mmu(env, addr, val, oi, GETPC());
}
void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
+ MemOp mop = get_memop(oi);
- validate_memop(oi, MO_128 | MO_BE);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
+ tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
if (!HOST_BIG_ENDIAN) {
val = bswap128(val);
}
- memcpy(haddr, &val, 16);
- clear_helper_retaddr();
+ do_st16_he_mmu(env, addr, val, mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
Int128 val, MemOpIdx oi, uintptr_t ra)
{
- void *haddr;
+ MemOp mop = get_memop(oi);
- validate_memop(oi, MO_128 | MO_LE);
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
+ tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
if (HOST_BIG_ENDIAN) {
val = bswap128(val);
}
- memcpy(haddr, &val, 16);
- clear_helper_retaddr();
+ do_st16_he_mmu(env, addr, val, mop, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
@@ -1267,7 +1462,6 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
void *haddr;
uint64_t ret;
- validate_memop(oi, MO_BEUQ);
haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
ret = ldq_p(haddr);
clear_helper_retaddr();