diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-03-27 19:56:31 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-16 16:30:29 -0700 |
commit | ddfdd4178beb56543ac98976efbc885d7e2b5150 (patch) | |
tree | 9e7047cec503d005d11ef4892c8af077a43ed0d4 | |
parent | e570597a8a7762bc85196b699c0d733dc33929ec (diff) | |
download | qemu-ddfdd4178beb56543ac98976efbc885d7e2b5150.zip qemu-ddfdd4178beb56543ac98976efbc885d7e2b5150.tar.gz qemu-ddfdd4178beb56543ac98976efbc885d7e2b5150.tar.bz2 |
tcg: Widen helper_atomic_* addresses to uint64_t
Always pass the target address as uint64_t.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | accel/tcg/atomic_common.c.inc | 14 | ||||
-rw-r--r-- | accel/tcg/tcg-runtime.h | 46 | ||||
-rw-r--r-- | tcg/tcg-op-ldst.c | 38 |
3 files changed, 57 insertions, 41 deletions
diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 8f2ce43..fe0eea0 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -13,20 +13,20 @@ * See the COPYING file in the top-level directory. */ -static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, +static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr, MemOpIdx oi) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); } #if HAVE_ATOMIC128 -static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, +static void atomic_trace_ld_post(CPUArchState *env, uint64_t addr, MemOpIdx oi) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); } -static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, +static void atomic_trace_st_post(CPUArchState *env, uint64_t addr, MemOpIdx oi) { qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); @@ -40,7 +40,7 @@ static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, */ #define CMPXCHG_HELPER(OP, TYPE) \ - TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr, \ + TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \ TYPE oldv, TYPE newv, uint32_t oi) \ { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); } @@ -62,7 +62,7 @@ CMPXCHG_HELPER(cmpxchgo_le, Int128) #undef CMPXCHG_HELPER -Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr, +Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, uint64_t addr, Int128 cmpv, Int128 newv, uint32_t oi) { #if TCG_TARGET_REG_BITS == 32 @@ -82,7 +82,7 @@ Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr, #endif } -Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr, +Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, uint64_t addr, Int128 cmpv, Int128 newv, uint32_t oi) { #if TCG_TARGET_REG_BITS == 32 @@ -103,7 +103,7 @@ Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr, } #define ATOMIC_HELPER(OP, TYPE) \ - TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr, \ + TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \ TYPE val, uint32_t oi) \ { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); } diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index 0e6c5f5..6f8c206 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -43,61 +43,61 @@ DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32) DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG, - i32, env, tl, i32, i32, i32) + i32, env, i64, i32, i32, i32) #ifdef CONFIG_ATOMIC64 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) + i64, env, i64, i64, i64, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG, - i64, env, tl, i64, i64, i32) + i64, env, i64, i64, i64, i32) #endif #ifdef CONFIG_CMPXCHG128 DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) #endif DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG, - i128, env, tl, i128, i128, i32) + i128, env, i64, i128, i128, i32) #ifdef CONFIG_ATOMIC64 #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) \ + TCG_CALL_NO_WG, i64, env, i64, i64, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be), \ - TCG_CALL_NO_WG, i64, env, tl, i64, i32) + TCG_CALL_NO_WG, i64, env, i64, i64, i32) #else #define GEN_ATOMIC_HELPERS(NAME) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) \ + TCG_CALL_NO_WG, i32, env, i64, i32, i32) \ DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be), \ - TCG_CALL_NO_WG, i32, env, tl, i32, i32) + TCG_CALL_NO_WG, i32, env, i64, i32, i32) #endif /* CONFIG_ATOMIC64 */ GEN_ATOMIC_HELPERS(fetch_add) diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index aab6dda..ca57a27 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -623,15 +623,15 @@ static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) } } -typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, +typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i32, TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, +typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i32); -typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv, +typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv_i64, TCGv_i128, TCGv_i128, TCGv_i32); -typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, +typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i32, TCGv_i32); -typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, +typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64, TCGv_i32); #ifdef CONFIG_ATOMIC64 @@ -682,6 +682,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, MemOp memop) { gen_atomic_cx_i32 gen; + TCGv_i64 a64; MemOpIdx oi; if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { @@ -694,7 +695,9 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); if (memop & MO_SIGN) { tcg_gen_ext_i32(retv, retv, memop); @@ -750,7 +753,9 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; if (gen) { MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); return; } @@ -802,11 +807,14 @@ void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, ? gen_helper_nonatomic_cmpxchgo_le : gen_helper_nonatomic_cmpxchgo_be); MemOpIdx oi = make_memop_idx(memop, idx); + TCGv_i64 a64; tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); } else { TCGv_i128 oldv = tcg_temp_ebb_new_i128(); TCGv_i128 tmpv = tcg_temp_ebb_new_i128(); @@ -854,7 +862,9 @@ void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, if (gen) { MemOpIdx oi = make_memop_idx(memop, idx); - gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(retv, cpu_env, a64, cmpv, newv, tcg_constant_i32(oi)); + maybe_free_addr64(a64); return; } @@ -892,6 +902,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; + TCGv_i64 a64; MemOpIdx oi; memop = tcg_canonicalize_memop(memop, 0, 0); @@ -900,7 +911,9 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); if (memop & MO_SIGN) { tcg_gen_ext_i32(ret, ret, memop); @@ -934,13 +947,16 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_op_i64 gen; + TCGv_i64 a64; MemOpIdx oi; gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); - gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); + a64 = maybe_extend_addr64(addr); + gen(ret, cpu_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); #else gen_helper_exit_atomic(cpu_env); /* Produce a result, so that we have a well-formed opcode stream |