aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/atomic_common.c.inc42
-rw-r--r--accel/tcg/atomic_template.h93
-rw-r--r--accel/tcg/cpu-exec.c2
-rw-r--r--accel/tcg/cputlb.c209
-rw-r--r--accel/tcg/ldst_atomicity.c.inc135
-rw-r--r--accel/tcg/ldst_common.c.inc24
-rw-r--r--accel/tcg/tcg-runtime.h4
-rw-r--r--accel/tcg/translate-all.c2
-rw-r--r--accel/tcg/translator.c2
-rw-r--r--accel/tcg/user-exec.c312
10 files changed, 189 insertions, 636 deletions
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc
index fe0eea0..ee222fd 100644
--- a/accel/tcg/atomic_common.c.inc
+++ b/accel/tcg/atomic_common.c.inc
@@ -19,20 +19,6 @@ static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
}
-#if HAVE_ATOMIC128
-static void atomic_trace_ld_post(CPUArchState *env, uint64_t addr,
- MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-}
-
-static void atomic_trace_st_post(CPUArchState *env, uint64_t addr,
- MemOpIdx oi)
-{
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-#endif
-
/*
* Atomic helpers callable from TCG.
* These have a common interface and all defer to cpu_atomic_*
@@ -62,36 +48,16 @@ CMPXCHG_HELPER(cmpxchgo_le, Int128)
#undef CMPXCHG_HELPER
-Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, uint64_t addr,
- Int128 cmpv, Int128 newv, uint32_t oi)
-{
-#if TCG_TARGET_REG_BITS == 32
- uintptr_t ra = GETPC();
- Int128 oldv;
-
- oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
- if (int128_eq(oldv, cmpv)) {
- cpu_st16_be_mmu(env, addr, newv, oi, ra);
- } else {
- /* Even with comparison failure, still need a write cycle. */
- probe_write(env, addr, 16, get_mmuidx(oi), ra);
- }
- return oldv;
-#else
- g_assert_not_reached();
-#endif
-}
-
-Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, uint64_t addr,
- Int128 cmpv, Int128 newv, uint32_t oi)
+Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
+ Int128 cmpv, Int128 newv, uint32_t oi)
{
#if TCG_TARGET_REG_BITS == 32
uintptr_t ra = GETPC();
Int128 oldv;
- oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
+ oldv = cpu_ld16_mmu(env, addr, oi, ra);
if (int128_eq(oldv, cmpv)) {
- cpu_st16_le_mmu(env, addr, newv, oi, ra);
+ cpu_st16_mmu(env, addr, newv, oi, ra);
} else {
/* Even with comparison failure, still need a write cycle. */
probe_write(env, addr, 16, get_mmuidx(oi), ra);
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index 404a530..e312acd 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -73,8 +73,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ | PAGE_WRITE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -87,38 +86,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return ret;
}
-#if DATA_SIZE >= 16
-#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ, retaddr);
- DATA_TYPE val;
-
- val = atomic16_read(haddr);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_ld_post(env, addr, oi);
- return val;
-}
-
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_WRITE, retaddr);
-
- atomic16_set(haddr, val);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_st_post(env, addr, oi);
-}
-#endif
-#else
+#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ | PAGE_WRITE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val);
@@ -131,9 +103,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
- PAGE_READ | PAGE_WRITE, retaddr); \
- DATA_TYPE ret; \
+ DATA_TYPE *haddr, ret; \
+ haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -163,9 +134,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
- XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
- PAGE_READ | PAGE_WRITE, retaddr); \
- XDATA_TYPE cmp, old, new, val = xval; \
+ XDATA_TYPE *haddr, cmp, old, new, val = xval; \
+ haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@@ -188,7 +158,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA SIZE >= 16 */
+#endif /* DATA SIZE < 16 */
#undef END
@@ -206,8 +176,7 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ | PAGE_WRITE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -220,39 +189,11 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
return BSWAP(ret);
}
-#if DATA_SIZE >= 16
-#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ, retaddr);
- DATA_TYPE val;
-
- val = atomic16_read(haddr);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_ld_post(env, addr, oi);
- return BSWAP(val);
-}
-
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_WRITE, retaddr);
-
- val = BSWAP(val);
- atomic16_set(haddr, val);
- ATOMIC_MMU_CLEANUP;
- atomic_trace_st_post(env, addr, oi);
-}
-#endif
-#else
+#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
- PAGE_READ | PAGE_WRITE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@@ -265,9 +206,8 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
- PAGE_READ | PAGE_WRITE, retaddr); \
- DATA_TYPE ret; \
+ DATA_TYPE *haddr, ret; \
+ haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -294,9 +234,8 @@ GEN_ATOMIC_HELPER(xor_fetch)
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
- XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
- PAGE_READ | PAGE_WRITE, retaddr); \
- XDATA_TYPE ldo, ldn, old, new, val = xval; \
+ XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
+ haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
@@ -326,7 +265,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
#undef ADD
#undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA_SIZE >= 16 */
+#endif /* DATA_SIZE < 16 */
#undef END
#endif /* DATA_SIZE > 1 */
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index bc0e1c3..0e74196 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -307,7 +307,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
tb->flags, tb->cflags, lookup_symbol(pc));
-#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
FILE *logfile = qemu_log_trylock();
if (logfile) {
@@ -323,7 +322,6 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
qemu_log_unlock(logfile);
}
}
-#endif /* DEBUG_DISAS */
}
}
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index ae0fbcd..90c72c9 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1896,12 +1896,9 @@ static bool mmu_lookup(CPUArchState *env, target_ulong addr, MemOpIdx oi,
/*
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
- *
- * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, int size, int prot,
- uintptr_t retaddr)
+ MemOpIdx oi, int size, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
MemOp mop = get_memop(oi);
@@ -1937,53 +1934,36 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
tlbe = tlb_entry(env, mmu_idx, addr);
/* Check TLB entry and enforce page permissions. */
- if (prot & PAGE_WRITE) {
- tlb_addr = tlb_addr_write(tlbe);
- if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
- addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, size,
- MMU_DATA_STORE, mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
- }
- tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
+ tlb_addr = tlb_addr_write(tlbe);
+ if (!tlb_hit(tlb_addr, addr)) {
+ if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_STORE,
+ addr & TARGET_PAGE_MASK)) {
+ tlb_fill(env_cpu(env), addr, size,
+ MMU_DATA_STORE, mmu_idx, retaddr);
+ index = tlb_index(env, mmu_idx, addr);
+ tlbe = tlb_entry(env, mmu_idx, addr);
}
+ tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
+ }
- if (prot & PAGE_READ) {
- /*
- * Let the guest notice RMW on a write-only page.
- * We have just verified that the page is writable.
- * Subpage lookups may have left TLB_INVALID_MASK set,
- * but addr_read will only be -1 if PAGE_READ was unset.
- */
- if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(env_cpu(env), addr, size,
- MMU_DATA_LOAD, mmu_idx, retaddr);
- /*
- * Since we don't support reads and writes to different
- * addresses, and we do have the proper page loaded for
- * write, this shouldn't ever return. But just in case,
- * handle via stop-the-world.
- */
- goto stop_the_world;
- }
- /* Collect TLB_WATCHPOINT for read. */
- tlb_addr |= tlbe->addr_read;
- }
- } else /* if (prot & PAGE_READ) */ {
- tlb_addr = tlbe->addr_read;
- if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env, mmu_idx, index, MMU_DATA_LOAD,
- addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, size,
- MMU_DATA_LOAD, mmu_idx, retaddr);
- index = tlb_index(env, mmu_idx, addr);
- tlbe = tlb_entry(env, mmu_idx, addr);
- }
- tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
- }
+ /*
+ * Let the guest notice RMW on a write-only page.
+ * We have just verified that the page is writable.
+ * Subpage lookups may have left TLB_INVALID_MASK set,
+ * but addr_read will only be -1 if PAGE_READ was unset.
+ */
+ if (unlikely(tlbe->addr_read == -1)) {
+ tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ /*
+ * Since we don't support reads and writes to different
+ * addresses, and we do have the proper page loaded for
+ * write, this shouldn't ever return. But just in case,
+ * handle via stop-the-world.
+ */
+ goto stop_the_world;
}
+ /* Collect TLB_WATCHPOINT for read. */
+ tlb_addr |= tlbe->addr_read;
/* Notice an IO access or a needs-MMU-lookup access */
if (unlikely(tlb_addr & (TLB_MMIO | TLB_DISCARD_WRITE))) {
@@ -2000,11 +1980,8 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
- QEMU_BUILD_BUG_ON(PAGE_READ != BP_MEM_READ);
- QEMU_BUILD_BUG_ON(PAGE_WRITE != BP_MEM_WRITE);
- /* therefore prot == watchpoint bits */
- cpu_check_watchpoint(env_cpu(env), addr, size,
- full->attrs, prot, retaddr);
+ cpu_check_watchpoint(env_cpu(env), addr, size, full->attrs,
+ BP_MEM_READ | BP_MEM_WRITE, retaddr);
}
return hostaddr;
@@ -2575,89 +2552,45 @@ uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
return ret;
}
-uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint16_t ret;
-
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW);
- ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint32_t ret;
-
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL);
- ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- uint64_t ret;
-
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ);
- ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
uint16_t ret;
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
-uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
uint32_t ret;
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
-uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
uint64_t ret;
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
}
-Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
-{
- Int128 ret;
-
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128));
- ret = do_ld16_mmu(env, addr, oi, ra);
- plugin_load_cb(env, addr, oi);
- return ret;
-}
-
-Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
Int128 ret;
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128));
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
ret = do_ld16_mmu(env, addr, oi, ra);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2779,7 +2712,7 @@ static uint64_t do_st16_leN(CPUArchState *env, MMULookupPageData *p,
case MO_ATOM_WITHIN16_PAIR:
/* Since size > 8, this is the half that must be atomic. */
- if (!HAVE_al16) {
+ if (!HAVE_ATOMIC128_RW) {
cpu_loop_exit_atomic(env_cpu(env), ra);
}
return store_whole_le16(p->haddr, p->size, val_le);
@@ -3045,66 +2978,34 @@ void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
plugin_store_cb(env, addr, oi);
}
-void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW);
- do_st2_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL);
- do_st4_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ);
- do_st8_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void cpu_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
-void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+void cpu_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
-void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void cpu_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
-void cpu_st16_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_BE|MO_128));
- do_st16_mmu(env, addr, val, oi, retaddr);
- plugin_store_cb(env, addr, oi);
-}
-
-void cpu_st16_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
- MemOpIdx oi, uintptr_t retaddr)
+void cpu_st16_mmu(CPUArchState *env, target_ulong addr, Int128 val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- tcg_debug_assert((get_memop(oi) & (MO_BSWAP|MO_SIZE)) == (MO_LE|MO_128));
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
do_st16_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
@@ -3137,7 +3038,7 @@ void cpu_st16_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
#include "atomic_template.h"
#endif
-#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
+#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
#define DATA_SIZE 16
#include "atomic_template.h"
#endif
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index ba5db7c..0f6b3f8 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -16,35 +16,6 @@
#endif
#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
-/*
- * If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
- * that are supported by the host, e.g. s390x. We can force the pointer to
- * have our known alignment with __builtin_assume_aligned, however prior to
- * GCC 13 that was only reliable with optimization enabled. See
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
- */
-#if defined(CONFIG_ATOMIC128_OPT)
-# if !defined(__OPTIMIZE__)
-# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
-# endif
-# define CONFIG_ATOMIC128
-#endif
-#ifndef ATTRIBUTE_ATOMIC128_OPT
-# define ATTRIBUTE_ATOMIC128_OPT
-#endif
-
-#if defined(CONFIG_ATOMIC128)
-# define HAVE_al16_fast true
-#else
-# define HAVE_al16_fast false
-#endif
-#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
-# define HAVE_al16 true
-#else
-# define HAVE_al16 false
-#endif
-
-
/**
* required_atomicity:
*
@@ -164,26 +135,6 @@ static inline uint64_t load_atomic8(void *pv)
}
/**
- * load_atomic16:
- * @pv: host address
- *
- * Atomically load 16 aligned bytes from @pv.
- */
-static inline Int128 ATTRIBUTE_ATOMIC128_OPT
-load_atomic16(void *pv)
-{
-#ifdef CONFIG_ATOMIC128
- __uint128_t *p = __builtin_assume_aligned(pv, 16);
- Int128Alias r;
-
- r.u = qatomic_read__nocheck(p);
- return r.s;
-#else
- qemu_build_not_reached();
-#endif
-}
-
-/**
* load_atomic8_or_exit:
* @env: cpu context
* @ra: host unwind address
@@ -228,8 +179,8 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
{
Int128 *p = __builtin_assume_aligned(pv, 16);
- if (HAVE_al16_fast) {
- return load_atomic16(p);
+ if (HAVE_ATOMIC128_RO) {
+ return atomic16_read_ro(p);
}
#ifdef CONFIG_USER_ONLY
@@ -249,14 +200,9 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* In system mode all guest pages are writable, and for user-only
* we have just checked writability. Try cmpxchg.
*/
-#if defined(CONFIG_CMPXCHG128)
- /* Swap 0 with 0, with the side-effect of returning the old value. */
- {
- Int128Alias r;
- r.u = __sync_val_compare_and_swap_16((__uint128_t *)p, 0, 0);
- return r.s;
+ if (HAVE_ATOMIC128_RW) {
+ return atomic16_read_rw(p);
}
-#endif
/* Ultimate fallback: re-execute in serial context. */
cpu_loop_exit_atomic(env_cpu(env), ra);
@@ -377,11 +323,10 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
load_atom_extract_al16_or_al8(void *pv, int s)
{
-#if defined(CONFIG_ATOMIC128)
uintptr_t pi = (uintptr_t)pv;
int o = pi & 7;
int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8;
- __uint128_t r;
+ Int128 r;
pv = (void *)(pi & ~7);
if (pi & 8) {
@@ -390,18 +335,14 @@ load_atom_extract_al16_or_al8(void *pv, int s)
uint64_t b = qatomic_read__nocheck(p8 + 1);
if (HOST_BIG_ENDIAN) {
- r = ((__uint128_t)a << 64) | b;
+ r = int128_make128(b, a);
} else {
- r = ((__uint128_t)b << 64) | a;
+ r = int128_make128(a, b);
}
} else {
- __uint128_t *p16 = __builtin_assume_aligned(pv, 16, 0);
- r = qatomic_read__nocheck(p16);
+ r = atomic16_read_ro(pv);
}
- return r >> shr;
-#else
- qemu_build_not_reached();
-#endif
+ return int128_getlo(int128_urshift(r, shr));
}
/**
@@ -489,7 +430,7 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
if (likely((pi & 1) == 0)) {
return load_atomic2(pv);
}
- if (HAVE_al16_fast) {
+ if (HAVE_ATOMIC128_RO) {
return load_atom_extract_al16_or_al8(pv, 2);
}
@@ -528,7 +469,7 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
if (likely((pi & 3) == 0)) {
return load_atomic4(pv);
}
- if (HAVE_al16_fast) {
+ if (HAVE_ATOMIC128_RO) {
return load_atom_extract_al16_or_al8(pv, 4);
}
@@ -574,7 +515,7 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
if (HAVE_al8 && likely((pi & 7) == 0)) {
return load_atomic8(pv);
}
- if (HAVE_al16_fast) {
+ if (HAVE_ATOMIC128_RO) {
return load_atom_extract_al16_or_al8(pv, 8);
}
@@ -624,8 +565,8 @@ static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
* If the host does not support 16-byte atomics, wait until we have
* examined the atomicity parameters below.
*/
- if (HAVE_al16_fast && likely((pi & 15) == 0)) {
- return load_atomic16(pv);
+ if (HAVE_ATOMIC128_RO && likely((pi & 15) == 0)) {
+ return atomic16_read_ro(pv);
}
atmax = required_atomicity(env, pi, memop);
@@ -705,36 +646,6 @@ static inline void store_atomic8(void *pv, uint64_t val)
}
/**
- * store_atomic16:
- * @pv: host address
- * @val: value to store
- *
- * Atomically store 16 aligned bytes to @pv.
- */
-static inline void ATTRIBUTE_ATOMIC128_OPT
-store_atomic16(void *pv, Int128Alias val)
-{
-#if defined(CONFIG_ATOMIC128)
- __uint128_t *pu = __builtin_assume_aligned(pv, 16);
- qatomic_set__nocheck(pu, val.u);
-#elif defined(CONFIG_CMPXCHG128)
- __uint128_t *pu = __builtin_assume_aligned(pv, 16);
- __uint128_t o;
-
- /*
- * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
- * defer to libatomic, so we must use __sync_*_compare_and_swap_16
- * and accept the sequential consistency that comes with it.
- */
- do {
- o = *pu;
- } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
-#else
- qemu_build_not_reached();
-#endif
-}
-
-/**
* store_atom_4x2
*/
static inline void store_atom_4_by_2(void *pv, uint32_t val)
@@ -974,7 +885,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
int sh = o * 8;
Int128 m, v;
- qemu_build_assert(HAVE_al16);
+ qemu_build_assert(HAVE_ATOMIC128_RW);
/* Like MAKE_64BIT_MASK(0, sz), but larger. */
if (sz <= 64) {
@@ -1034,7 +945,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
return;
}
} else if ((pi & 15) == 7) {
- if (HAVE_al16) {
+ if (HAVE_ATOMIC128_RW) {
Int128 v = int128_lshift(int128_make64(val), 56);
Int128 m = int128_lshift(int128_make64(0xffff), 56);
store_atom_insert_al16(pv - 7, v, m);
@@ -1103,7 +1014,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
return;
}
} else {
- if (HAVE_al16) {
+ if (HAVE_ATOMIC128_RW) {
store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val)));
return;
}
@@ -1168,7 +1079,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
}
break;
case MO_64:
- if (HAVE_al16) {
+ if (HAVE_ATOMIC128_RW) {
store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val)));
return;
}
@@ -1194,8 +1105,8 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
uint64_t a, b;
int atmax;
- if (HAVE_al16_fast && likely((pi & 15) == 0)) {
- store_atomic16(pv, val);
+ if (HAVE_ATOMIC128_RW && likely((pi & 15) == 0)) {
+ atomic16_set(pv, val);
return;
}
@@ -1223,7 +1134,7 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
}
break;
case -MO_64:
- if (HAVE_al16) {
+ if (HAVE_ATOMIC128_RW) {
uint64_t val_le;
int s2 = pi & 15;
int s1 = 16 - s2;
@@ -1250,8 +1161,8 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
}
break;
case MO_128:
- if (HAVE_al16) {
- store_atomic16(pv, val);
+ if (HAVE_ATOMIC128_RW) {
+ atomic16_set(pv, val);
return;
}
break;
diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc
index 6ac8d87..5f8144b 100644
--- a/accel/tcg/ldst_common.c.inc
+++ b/accel/tcg/ldst_common.c.inc
@@ -26,7 +26,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
- return cpu_ldw_be_mmu(env, addr, oi, ra);
+ return cpu_ldw_mmu(env, addr, oi, ra);
}
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -39,21 +39,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
- return cpu_ldl_be_mmu(env, addr, oi, ra);
+ return cpu_ldl_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
- return cpu_ldq_be_mmu(env, addr, oi, ra);
+ return cpu_ldq_mmu(env, addr, oi, ra);
}
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
- return cpu_ldw_le_mmu(env, addr, oi, ra);
+ return cpu_ldw_mmu(env, addr, oi, ra);
}
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -66,14 +66,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
- return cpu_ldl_le_mmu(env, addr, oi, ra);
+ return cpu_ldl_mmu(env, addr, oi, ra);
}
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
- return cpu_ldq_le_mmu(env, addr, oi, ra);
+ return cpu_ldq_mmu(env, addr, oi, ra);
}
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
@@ -87,42 +87,42 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
- cpu_stw_be_mmu(env, addr, val, oi, ra);
+ cpu_stw_mmu(env, addr, val, oi, ra);
}
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
- cpu_stl_be_mmu(env, addr, val, oi, ra);
+ cpu_stl_mmu(env, addr, val, oi, ra);
}
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
- cpu_stq_be_mmu(env, addr, val, oi, ra);
+ cpu_stq_mmu(env, addr, val, oi, ra);
}
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
- cpu_stw_le_mmu(env, addr, val, oi, ra);
+ cpu_stw_mmu(env, addr, val, oi, ra);
}
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
- cpu_stl_le_mmu(env, addr, val, oi, ra);
+ cpu_stl_mmu(env, addr, val, oi, ra);
}
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
int mmu_idx, uintptr_t ra)
{
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
- cpu_stq_le_mmu(env, addr, val, oi, ra);
+ cpu_stq_mmu(env, addr, val, oi, ra);
}
/*--------------------------*/
diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h
index 6f8c206..39e6800 100644
--- a/accel/tcg/tcg-runtime.h
+++ b/accel/tcg/tcg-runtime.h
@@ -65,9 +65,7 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
#endif
-DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG,
- i128, env, i64, i128, i128, i32)
-DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG,
+DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG,
i128, env, i64, i128, i128, i32)
#ifdef CONFIG_ATOMIC64
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 353849c..c87648b 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -432,7 +432,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
#endif
-#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
qemu_log_in_addr_range(pc)) {
FILE *logfile = qemu_log_trylock();
@@ -505,7 +504,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
qemu_log_unlock(logfile);
}
}
-#endif
qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index 7bda43f..6120ef2 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -122,7 +122,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
tb->size = db->pc_next - db->pc_first;
tb->icount = db->num_insns;
-#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(db->pc_first)) {
FILE *logfile = qemu_log_trylock();
@@ -133,7 +132,6 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
qemu_log_unlock(logfile);
}
}
-#endif
}
static void *translator_access(CPUArchState *env, DisasContextBase *db,
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 36ad828..dc8d6b5 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -940,8 +940,8 @@ uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
return ret;
}
-static uint16_t do_ld2_he_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
uint16_t ret;
@@ -950,14 +950,6 @@ static uint16_t do_ld2_he_mmu(CPUArchState *env, abi_ptr addr,
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_2(env, ra, haddr, mop);
clear_helper_retaddr();
- return ret;
-}
-
-tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
- uint16_t ret = do_ld2_he_mmu(env, addr, mop, ra);
if (mop & MO_BSWAP) {
ret = bswap16(ret);
@@ -965,44 +957,28 @@ tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
return ret;
}
-tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- int16_t ret = do_ld2_he_mmu(env, addr, mop, ra);
-
- if (mop & MO_BSWAP) {
- ret = bswap16(ret);
- }
- return ret;
+ return do_ld2_mmu(env, addr, get_memop(oi), ra);
}
-uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- uint16_t ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- ret = do_ld2_he_mmu(env, addr, mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return cpu_to_be16(ret);
+ return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
}
-uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- uint16_t ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- ret = do_ld2_he_mmu(env, addr, mop, ra);
+ uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return cpu_to_le16(ret);
+ return ret;
}
-static uint32_t do_ld4_he_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
uint32_t ret;
@@ -1011,14 +987,6 @@ static uint32_t do_ld4_he_mmu(CPUArchState *env, abi_ptr addr,
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_4(env, ra, haddr, mop);
clear_helper_retaddr();
- return ret;
-}
-
-tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
- uint32_t ret = do_ld4_he_mmu(env, addr, mop, ra);
if (mop & MO_BSWAP) {
ret = bswap32(ret);
@@ -1026,44 +994,28 @@ tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
return ret;
}
-tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- int32_t ret = do_ld4_he_mmu(env, addr, mop, ra);
-
- if (mop & MO_BSWAP) {
- ret = bswap32(ret);
- }
- return ret;
+ return do_ld4_mmu(env, addr, get_memop(oi), ra);
}
-uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- uint32_t ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- ret = do_ld4_he_mmu(env, addr, mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return cpu_to_be32(ret);
+ return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
}
-uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- uint32_t ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- ret = do_ld4_he_mmu(env, addr, mop, ra);
+ uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return cpu_to_le32(ret);
+ return ret;
}
-static uint64_t do_ld8_he_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
uint64_t ret;
@@ -1072,14 +1024,6 @@ static uint64_t do_ld8_he_mmu(CPUArchState *env, abi_ptr addr,
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_8(env, ra, haddr, mop);
clear_helper_retaddr();
- return ret;
-}
-
-uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
- uint64_t ret = do_ld8_he_mmu(env, addr, mop, ra);
if (mop & MO_BSWAP) {
ret = bswap64(ret);
@@ -1087,32 +1031,22 @@ uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
return ret;
}
-uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
+uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- uint64_t ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- ret = do_ld8_he_mmu(env, addr, mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return cpu_to_be64(ret);
+ return do_ld8_mmu(env, addr, get_memop(oi), ra);
}
-uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- uint64_t ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- ret = do_ld8_he_mmu(env, addr, mop, ra);
+ uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- return cpu_to_le64(ret);
+ return ret;
}
-static Int128 do_ld16_he_mmu(CPUArchState *env, abi_ptr addr,
- MemOp mop, uintptr_t ra)
+static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
Int128 ret;
@@ -1121,14 +1055,6 @@ static Int128 do_ld16_he_mmu(CPUArchState *env, abi_ptr addr,
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(env, ra, haddr, mop);
clear_helper_retaddr();
- return ret;
-}
-
-Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
- MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
- Int128 ret = do_ld16_he_mmu(env, addr, mop, ra);
if (mop & MO_BSWAP) {
ret = bswap128(ret);
@@ -1136,38 +1062,22 @@ Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
return ret;
}
-Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
+Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+ MemOpIdx oi, uintptr_t ra)
{
- return helper_ld16_mmu(env, addr, oi, GETPC());
+ return do_ld16_mmu(env, addr, get_memop(oi), ra);
}
-Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
{
- MemOp mop = get_memop(oi);
- Int128 ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- ret = do_ld16_he_mmu(env, addr, mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- if (!HOST_BIG_ENDIAN) {
- ret = bswap128(ret);
- }
- return ret;
+ return helper_ld16_mmu(env, addr, oi, GETPC());
}
-Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
- MemOpIdx oi, uintptr_t ra)
+Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
+ MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
- Int128 ret;
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- ret = do_ld16_he_mmu(env, addr, mop, ra);
+ Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
- if (HOST_BIG_ENDIAN) {
- ret = bswap128(ret);
- }
return ret;
}
@@ -1195,139 +1105,101 @@ void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
-static void do_st2_he_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
- MemOp mop, uintptr_t ra)
+static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_16);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
- store_atom_2(env, ra, haddr, mop, val);
- clear_helper_retaddr();
-}
-
-void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
if (mop & MO_BSWAP) {
val = bswap16(val);
}
- do_st2_he_mmu(env, addr, val, mop, ra);
+ store_atom_2(env, ra, haddr, mop, val);
+ clear_helper_retaddr();
}
-void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- do_st2_he_mmu(env, addr, be16_to_cpu(val), mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+ do_st2_mmu(env, addr, val, get_memop(oi), ra);
}
-void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- do_st2_he_mmu(env, addr, le16_to_cpu(val), mop, ra);
+ do_st2_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
-static void do_st4_he_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOp mop, uintptr_t ra)
+static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_32);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
- store_atom_4(env, ra, haddr, mop, val);
- clear_helper_retaddr();
-}
-
-void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
if (mop & MO_BSWAP) {
val = bswap32(val);
}
- do_st4_he_mmu(env, addr, val, mop, ra);
+ store_atom_4(env, ra, haddr, mop, val);
+ clear_helper_retaddr();
}
-void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- do_st4_he_mmu(env, addr, be32_to_cpu(val), mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+ do_st4_mmu(env, addr, val, get_memop(oi), ra);
}
-void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
+void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- do_st4_he_mmu(env, addr, le32_to_cpu(val), mop, ra);
+ do_st4_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
-static void do_st8_he_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
- MemOp mop, uintptr_t ra)
+static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_64);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
- store_atom_8(env, ra, haddr, mop, val);
- clear_helper_retaddr();
-}
-
-void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
- MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
if (mop & MO_BSWAP) {
val = bswap64(val);
}
- do_st8_he_mmu(env, addr, val, mop, ra);
+ store_atom_8(env, ra, haddr, mop, val);
+ clear_helper_retaddr();
}
-void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- do_st8_he_mmu(env, addr, cpu_to_be64(val), mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+ do_st8_mmu(env, addr, val, get_memop(oi), ra);
}
-void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- do_st8_he_mmu(env, addr, cpu_to_le64(val), mop, ra);
+ do_st8_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
-static void do_st16_he_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
- MemOp mop, uintptr_t ra)
+static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+ MemOp mop, uintptr_t ra)
{
void *haddr;
tcg_debug_assert((mop & MO_SIZE) == MO_128);
haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
+
+ if (mop & MO_BSWAP) {
+ val = bswap128(val);
+ }
store_atom_16(env, ra, haddr, mop, val);
clear_helper_retaddr();
}
@@ -1335,12 +1207,7 @@ static void do_st16_he_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- if (mop & MO_BSWAP) {
- val = bswap128(val);
- }
- do_st16_he_mmu(env, addr, val, mop, ra);
+ do_st16_mmu(env, addr, val, get_memop(oi), ra);
}
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
@@ -1348,29 +1215,10 @@ void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
helper_st16_mmu(env, addr, val, oi, GETPC());
}
-void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
- Int128 val, MemOpIdx oi, uintptr_t ra)
+void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
+ Int128 val, MemOpIdx oi, uintptr_t ra)
{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_BE);
- if (!HOST_BIG_ENDIAN) {
- val = bswap128(val);
- }
- do_st16_he_mmu(env, addr, val, mop, ra);
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
- Int128 val, MemOpIdx oi, uintptr_t ra)
-{
- MemOp mop = get_memop(oi);
-
- tcg_debug_assert((mop & MO_BSWAP) == MO_LE);
- if (HOST_BIG_ENDIAN) {
- val = bswap128(val);
- }
- do_st16_he_mmu(env, addr, val, mop, ra);
+ do_st16_mmu(env, addr, val, get_memop(oi), ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
@@ -1475,12 +1323,9 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
/*
* Do not allow unaligned operations to proceed. Return the host address.
- *
- * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
*/
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, int size, int prot,
- uintptr_t retaddr)
+ MemOpIdx oi, int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
int a_bits = get_alignment_bits(mop);
@@ -1488,8 +1333,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
/* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) {
- MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
- cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
+ cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
}
/* Enforce qemu required alignment. */
@@ -1527,7 +1371,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#include "atomic_template.h"
#endif
-#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
+#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
#define DATA_SIZE 16
#include "atomic_template.h"
#endif