diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-05-16 20:07:20 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-16 20:07:20 -0700 |
commit | fecccfcc542370caedbfc09fe9963d128f5e73a0 (patch) | |
tree | 78d60fe5833e95d65d2344981ea4ea0ddeaa1386 /tcg/tcg-op-ldst.c | |
parent | 0700ceb3939f08619d7f323209597ef62b489514 (diff) | |
download | qemu-fecccfcc542370caedbfc09fe9963d128f5e73a0.zip qemu-fecccfcc542370caedbfc09fe9963d128f5e73a0.tar.gz qemu-fecccfcc542370caedbfc09fe9963d128f5e73a0.tar.bz2 |
tcg: Split INDEX_op_qemu_{ld,st}* for guest address size
For 32-bit hosts, we cannot simply rely on TCGContext.addr_bits,
as we need one or two host registers to represent the guest address.
Create the new opcodes and update all users. Since we have not
yet eliminated TARGET_LONG_BITS, only one of the two opcodes will
ever be used, so we can get away with treating them the same in
the backends.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/tcg-op-ldst.c')
-rw-r--r-- | tcg/tcg-op-ldst.c | 85 |
1 files changed, 64 insertions, 21 deletions
diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 679be51..f4e508c 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -164,6 +164,7 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, MemOp orig_memop; MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; + TCGOpcode opc; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); orig_memop = memop = tcg_canonicalize_memop(memop, 0, 0); @@ -179,7 +180,12 @@ static void tcg_gen_qemu_ld_i32_int(TCGv_i32 val, TCGTemp *addr, } copy_addr = plugin_maybe_preserve_addr(addr); - gen_ldst(INDEX_op_qemu_ld_i32, tcgv_i32_temp(val), NULL, addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i32; + } else { + opc = INDEX_op_qemu_ld_a64_i32; + } + gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { @@ -235,9 +241,17 @@ static void tcg_gen_qemu_st_i32_int(TCGv_i32 val, TCGTemp *addr, } if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { - opc = INDEX_op_qemu_st8_i32; + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st8_a32_i32; + } else { + opc = INDEX_op_qemu_st8_a64_i32; + } } else { - opc = INDEX_op_qemu_st_i32; + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i32; + } else { + opc = INDEX_op_qemu_st_a64_i32; + } } gen_ldst(opc, tcgv_i32_temp(val), NULL, addr, oi); plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); @@ -261,6 +275,7 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, MemOp orig_memop; MemOpIdx orig_oi, oi; TCGv_i64 copy_addr; + TCGOpcode opc; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32_int(TCGV_LOW(val), addr, idx, memop); @@ -286,7 +301,12 @@ static void tcg_gen_qemu_ld_i64_int(TCGv_i64 val, TCGTemp *addr, } copy_addr = plugin_maybe_preserve_addr(addr); - gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i64; + } else { + opc = INDEX_op_qemu_ld_a64_i64; + } + gen_ldst_i64(opc, val, addr, oi); plugin_gen_mem_callbacks(copy_addr, addr, orig_oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { @@ -322,6 +342,7 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, { TCGv_i64 swap = NULL; MemOpIdx orig_oi, oi; + TCGOpcode opc; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32_int(TCGV_LOW(val), addr, idx, memop); @@ -352,7 +373,12 @@ static void tcg_gen_qemu_st_i64_int(TCGv_i64 val, TCGTemp *addr, oi = make_memop_idx(memop, idx); } - gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i64; + } else { + opc = INDEX_op_qemu_st_a64_i64; + } + gen_ldst_i64(opc, val, addr, oi); plugin_gen_mem_callbacks(NULL, addr, orig_oi, QEMU_PLUGIN_MEM_W); if (swap) { @@ -465,6 +491,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, { const MemOpIdx orig_oi = make_memop_idx(memop, idx); TCGv_i64 ext_addr = NULL; + TCGOpcode opc; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); @@ -484,8 +511,12 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, hi = TCGV128_HIGH(val); } - gen_ldst(INDEX_op_qemu_ld_i128, tcgv_i64_temp(lo), - tcgv_i64_temp(hi), addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i128; + } else { + opc = INDEX_op_qemu_ld_a64_i128; + } + gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_gen_bswap64_i64(lo, lo); @@ -500,6 +531,12 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, canonicalize_memop_i128_as_i64(mop, memop); need_bswap = (mop[0] ^ memop) & MO_BSWAP; + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_ld_a32_i64; + } else { + opc = INDEX_op_qemu_ld_a64_i64; + } + /* * Since there are no global TCGv_i128, there is no visible state * changed if the second load faults. Load directly into the two @@ -513,8 +550,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, y = TCGV128_LOW(val); } - gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, - make_memop_idx(mop[0], idx)); + gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); if (need_bswap) { tcg_gen_bswap64_i64(x, x); @@ -530,8 +566,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr, addr_p8 = tcgv_i64_temp(t); } - gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, - make_memop_idx(mop[1], idx)); + gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); tcg_temp_free_internal(addr_p8); if (need_bswap) { @@ -564,6 +599,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, { const MemOpIdx orig_oi = make_memop_idx(memop, idx); TCGv_i64 ext_addr = NULL; + TCGOpcode opc; tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); @@ -586,8 +622,12 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, hi = TCGV128_HIGH(val); } - gen_ldst(INDEX_op_qemu_st_i128, tcgv_i64_temp(lo), - tcgv_i64_temp(hi), addr, oi); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i128; + } else { + opc = INDEX_op_qemu_st_a64_i128; + } + gen_ldst(opc, tcgv_i64_temp(lo), tcgv_i64_temp(hi), addr, oi); if (need_bswap) { tcg_temp_free_i64(lo); @@ -600,6 +640,12 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, canonicalize_memop_i128_as_i64(mop, memop); + if (tcg_ctx->addr_type == TCG_TYPE_I32) { + opc = INDEX_op_qemu_st_a32_i64; + } else { + opc = INDEX_op_qemu_st_a64_i64; + } + if ((memop & MO_BSWAP) == MO_LE) { x = TCGV128_LOW(val); y = TCGV128_HIGH(val); @@ -613,8 +659,8 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, tcg_gen_bswap64_i64(b, x); x = b; } - gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, - make_memop_idx(mop[0], idx)); + + gen_ldst_i64(opc, x, addr, make_memop_idx(mop[0], idx)); if (tcg_ctx->addr_type == TCG_TYPE_I32) { TCGv_i32 t = tcg_temp_ebb_new_i32(); @@ -628,13 +674,10 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr, if (b) { tcg_gen_bswap64_i64(b, y); - y = b; - } - gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, - make_memop_idx(mop[1], idx)); - - if (b) { + gen_ldst_i64(opc, b, addr_p8, make_memop_idx(mop[1], idx)); tcg_temp_free_i64(b); + } else { + gen_ldst_i64(opc, y, addr_p8, make_memop_idx(mop[1], idx)); } tcg_temp_free_internal(addr_p8); } else { |