diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-01-29 22:52:12 -1000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2021-03-17 07:24:44 -0600 |
commit | 63041ed25fac24b9b0271c6cbf5062f37a0e8c74 (patch) | |
tree | e5bbfae92217e0ab6a2442334631dc230c1ade61 | |
parent | 00e338faa0cc26c861e2d7b1b5116f2c76574045 (diff) | |
download | qemu-63041ed25fac24b9b0271c6cbf5062f37a0e8c74.zip qemu-63041ed25fac24b9b0271c6cbf5062f37a0e8c74.tar.gz qemu-63041ed25fac24b9b0271c6cbf5062f37a0e8c74.tar.bz2 |
tcg/tci: Split out tci_args_{rrm,rrrm,rrrrm}
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r-- | tcg/tci.c | 147 |
1 files changed, 81 insertions, 66 deletions
@@ -66,22 +66,18 @@ tci_write_reg(tcg_target_ulong *regs, TCGReg index, tcg_target_ulong value) regs[index] = value; } -#if TCG_TARGET_REG_BITS == 32 static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index, uint32_t low_index, uint64_t value) { tci_write_reg(regs, low_index, value); tci_write_reg(regs, high_index, value >> 32); } -#endif -#if TCG_TARGET_REG_BITS == 32 /* Create a 64 bit value from two 32 bit values. */ static uint64_t tci_uint64(uint32_t high, uint32_t low) { return ((uint64_t)high << 32) + low; } -#endif /* Read constant byte from bytecode. */ static uint8_t tci_read_b(const uint8_t **tb_ptr) @@ -121,43 +117,6 @@ static int32_t tci_read_s32(const uint8_t **tb_ptr) return value; } -/* Read indexed register (native size) from bytecode. */ -static tcg_target_ulong -tci_read_rval(const tcg_target_ulong *regs, const uint8_t **tb_ptr) -{ - tcg_target_ulong value = tci_read_reg(regs, **tb_ptr); - *tb_ptr += 1; - return value; -} - -#if TCG_TARGET_REG_BITS == 32 -/* Read two indexed registers (2 * 32 bit) from bytecode. */ -static uint64_t tci_read_r64(const tcg_target_ulong *regs, - const uint8_t **tb_ptr) -{ - uint32_t low = tci_read_rval(regs, tb_ptr); - return tci_uint64(tci_read_rval(regs, tb_ptr), low); -} -#elif TCG_TARGET_REG_BITS == 64 -/* Read indexed register (64 bit) from bytecode. */ -static uint64_t tci_read_r64(const tcg_target_ulong *regs, - const uint8_t **tb_ptr) -{ - return tci_read_rval(regs, tb_ptr); -} -#endif - -/* Read indexed register(s) with target address from bytecode. */ -static target_ulong -tci_read_ulong(const tcg_target_ulong *regs, const uint8_t **tb_ptr) -{ - target_ulong taddr = tci_read_rval(regs, tb_ptr); -#if TARGET_LONG_BITS > TCG_TARGET_REG_BITS - taddr += (uint64_t)tci_read_rval(regs, tb_ptr) << 32; -#endif - return taddr; -} - static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr) { return tci_read_i(tb_ptr); @@ -173,6 +132,7 @@ static tcg_target_ulong tci_read_label(const uint8_t **tb_ptr) * i = immediate (uint32_t) * I = immediate (tcg_target_ulong) * l = label or pointer + * m = immediate (TCGMemOpIdx) * r = register * s = signed ldst offset */ @@ -205,6 +165,14 @@ static void tci_args_rI(const uint8_t **tb_ptr, } #endif +static void tci_args_rrm(const uint8_t **tb_ptr, + TCGReg *r0, TCGReg *r1, TCGMemOpIdx *m2) +{ + *r0 = tci_read_r(tb_ptr); + *r1 = tci_read_r(tb_ptr); + *m2 = tci_read_i32(tb_ptr); +} + static void tci_args_rrr(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, TCGReg *r2) { @@ -239,6 +207,15 @@ static void tci_args_rrrc(const uint8_t **tb_ptr, *c3 = tci_read_b(tb_ptr); } +static void tci_args_rrrm(const uint8_t **tb_ptr, + TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3) +{ + *r0 = tci_read_r(tb_ptr); + *r1 = tci_read_r(tb_ptr); + *r2 = tci_read_r(tb_ptr); + *m3 = tci_read_i32(tb_ptr); +} + static void tci_args_rrrbb(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, TCGReg *r2, uint8_t *i3, uint8_t *i4) { @@ -249,6 +226,16 @@ static void tci_args_rrrbb(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, *i4 = tci_read_b(tb_ptr); } +static void tci_args_rrrrm(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, + TCGReg *r2, TCGReg *r3, TCGMemOpIdx *m4) +{ + *r0 = tci_read_r(tb_ptr); + *r1 = tci_read_r(tb_ptr); + *r2 = tci_read_r(tb_ptr); + *r3 = tci_read_r(tb_ptr); + *m4 = tci_read_i32(tb_ptr); +} + #if TCG_TARGET_REG_BITS == 32 static void tci_args_rrrr(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3) @@ -442,8 +429,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, uint8_t op_size = tb_ptr[1]; const uint8_t *old_code_ptr = tb_ptr; #endif - TCGReg r0, r1, r2; - tcg_target_ulong t0; + TCGReg r0, r1, r2, r3; tcg_target_ulong t1; TCGCond condition; target_ulong taddr; @@ -451,7 +437,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, uint32_t tmp32; uint64_t tmp64; #if TCG_TARGET_REG_BITS == 32 - TCGReg r3, r4, r5; + TCGReg r4, r5; uint64_t T1, T2; #endif TCGMemOpIdx oi; @@ -838,9 +824,13 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, continue; case INDEX_op_qemu_ld_i32: - t0 = *tb_ptr++; - taddr = tci_read_ulong(regs, &tb_ptr); - oi = tci_read_i32(&tb_ptr); + if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + taddr = regs[r1]; + } else { + tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + taddr = tci_uint64(regs[r2], regs[r1]); + } switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) { case MO_UB: tmp32 = qemu_ld_ub; @@ -869,15 +859,20 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, default: g_assert_not_reached(); } - tci_write_reg(regs, t0, tmp32); + regs[r0] = tmp32; break; + case INDEX_op_qemu_ld_i64: - t0 = *tb_ptr++; - if (TCG_TARGET_REG_BITS == 32) { - t1 = *tb_ptr++; + if (TCG_TARGET_REG_BITS == 64) { + tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + taddr = regs[r1]; + } else if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + taddr = regs[r2]; + } else { + tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi); + taddr = tci_uint64(regs[r3], regs[r2]); } - taddr = tci_read_ulong(regs, &tb_ptr); - oi = tci_read_i32(&tb_ptr); switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) { case MO_UB: tmp64 = qemu_ld_ub; @@ -918,39 +913,58 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, default: g_assert_not_reached(); } - tci_write_reg(regs, t0, tmp64); if (TCG_TARGET_REG_BITS == 32) { - tci_write_reg(regs, t1, tmp64 >> 32); + tci_write_reg64(regs, r1, r0, tmp64); + } else { + regs[r0] = tmp64; } break; + case INDEX_op_qemu_st_i32: - t0 = tci_read_rval(regs, &tb_ptr); - taddr = tci_read_ulong(regs, &tb_ptr); - oi = tci_read_i32(&tb_ptr); + if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + taddr = regs[r1]; + } else { + tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + taddr = tci_uint64(regs[r2], regs[r1]); + } + tmp32 = regs[r0]; switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) { case MO_UB: - qemu_st_b(t0); + qemu_st_b(tmp32); break; case MO_LEUW: - qemu_st_lew(t0); + qemu_st_lew(tmp32); break; case MO_LEUL: - qemu_st_lel(t0); + qemu_st_lel(tmp32); break; case MO_BEUW: - qemu_st_bew(t0); + qemu_st_bew(tmp32); break; case MO_BEUL: - qemu_st_bel(t0); + qemu_st_bel(tmp32); break; default: g_assert_not_reached(); } break; + case INDEX_op_qemu_st_i64: - tmp64 = tci_read_r64(regs, &tb_ptr); - taddr = tci_read_ulong(regs, &tb_ptr); - oi = tci_read_i32(&tb_ptr); + if (TCG_TARGET_REG_BITS == 64) { + tci_args_rrm(&tb_ptr, &r0, &r1, &oi); + taddr = regs[r1]; + tmp64 = regs[r0]; + } else { + if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) { + tci_args_rrrm(&tb_ptr, &r0, &r1, &r2, &oi); + taddr = regs[r2]; + } else { + tci_args_rrrrm(&tb_ptr, &r0, &r1, &r2, &r3, &oi); + taddr = tci_uint64(regs[r3], regs[r2]); + } + tmp64 = tci_uint64(regs[r1], regs[r0]); + } switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) { case MO_UB: qemu_st_b(tmp64); @@ -977,6 +991,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, g_assert_not_reached(); } break; + case INDEX_op_mb: /* Ensure ordering for all kinds */ smp_mb(); |