diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-04-05 22:27:03 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-02 13:05:45 -0700 |
commit | 129f1f9ee7df77d367d961b3c25353612d33cd83 (patch) | |
tree | d1aa3082a2b2e1fbcdac6446643974e8a2ec0e1e /tcg/arm | |
parent | c6a98619f7179a3564008ea9c9b3672821a6812f (diff) | |
download | qemu-129f1f9ee7df77d367d961b3c25353612d33cd83.zip qemu-129f1f9ee7df77d367d961b3c25353612d33cd83.tar.gz qemu-129f1f9ee7df77d367d961b3c25353612d33cd83.tar.bz2 |
tcg: Introduce tcg_out_movext2
This is common code in most qemu_{ld,st} slow paths, moving two
registers when there may be overlap between sources and destinations.
At present, this is only used by 32-bit hosts for 64-bit data,
but will shortly be used for more than that.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/arm')
-rw-r--r-- | tcg/arm/tcg-target.c.inc | 44 |
1 files changed, 18 insertions, 26 deletions
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 8d769ca..83c818a 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1545,7 +1545,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGReg argreg, datalo, datahi; + TCGReg argreg; MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); @@ -1565,22 +1565,16 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) /* Use the canonical unsigned helpers and minimize icache usage. */ tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE]); - datalo = lb->datalo_reg; - datahi = lb->datahi_reg; if ((opc & MO_SIZE) == MO_64) { - if (datalo != TCG_REG_R1) { - tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); - tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); - } else if (datahi != TCG_REG_R0) { - tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); - tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_R0); - } else { - tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0); - tcg_out_mov_reg(s, COND_AL, datahi, TCG_REG_R1); - tcg_out_mov_reg(s, COND_AL, datalo, TCG_REG_TMP); - } + TCGMovExtend ext[2] = { + { .dst = lb->datalo_reg, .dst_type = TCG_TYPE_I32, + .src = TCG_REG_R0, .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, + { .dst = lb->datahi_reg, .dst_type = TCG_TYPE_I32, + .src = TCG_REG_R1, .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, + }; + tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP); } else { - tcg_out_movext(s, TCG_TYPE_I32, datalo, + tcg_out_movext(s, TCG_TYPE_I32, lb->datalo_reg, TCG_TYPE_I32, opc & MO_SSIZE, TCG_REG_R0); } @@ -1663,17 +1657,15 @@ static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) if (TARGET_LONG_BITS == 64) { /* 64-bit target address is aligned into R2:R3. */ - if (l->addrhi_reg != TCG_REG_R2) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); - } else if (l->addrlo_reg != TCG_REG_R3) { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, l->addrhi_reg); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, l->addrlo_reg); - } else { - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, TCG_REG_R2); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R2, TCG_REG_R3); - tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R3, TCG_REG_R1); - } + TCGMovExtend ext[2] = { + { .dst = TCG_REG_R2, .dst_type = TCG_TYPE_I32, + .src = l->addrlo_reg, + .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, + { .dst = TCG_REG_R3, .dst_type = TCG_TYPE_I32, + .src = l->addrhi_reg, + .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, + }; + tcg_out_movext2(s, &ext[0], &ext[1], TCG_REG_TMP); } else { tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R1, l->addrlo_reg); } |