aboutsummaryrefslogtreecommitdiff
path: root/target/s390x
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2023-03-14 10:09:15 +0000
committerPeter Maydell <peter.maydell@linaro.org>2023-03-14 10:09:15 +0000
commit27a03171d02ee0de8de4e2d3bed241795d672859 (patch)
tree4a3aa9eb1e3aed0e96286357e8563a15fcb1f4fa /target/s390x
parent5cfda4ce79dd455f1726874a555260a70f84b2ec (diff)
parent0c8b6b9a6383e2e37ff3d1d12b40c58b7ed36c1c (diff)
downloadqemu-27a03171d02ee0de8de4e2d3bed241795d672859.zip
qemu-27a03171d02ee0de8de4e2d3bed241795d672859.tar.gz
qemu-27a03171d02ee0de8de4e2d3bed241795d672859.tar.bz2
Merge tag 'pull-tcg-20230313' of https://gitlab.com/rth7680/qemu into staging
accel/tcg: Fix NB_MMU_MODES to 16 Balance of the target/ patchset which eliminates tcg_temp_free Balance of the target/ patchset which eliminates tcg_const # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmQPcb0dHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV885AgAjDbg1soRBy0THf0X # CVXmQ4yYyUKAonZBL8Abt9yX01BhLFqEsrju3HiaLNOM9DbwWQ4gdvSrtAZ/K2YG # d6EvC+rJe79pr58MEEhqO4OO1ymp52amRHtEXva4vcKRNuM9WF5by/Hz2PsZyenG # ysaLBdddooA9SJeL7xYBMpKWFgUm3C8NzfaRfCBVcG94er9u8RUi0kx+drmOLw0g # vZ3Hekvi2I8Y5mWqvHeAIOsr8Md9PO3ezWxEteE4qsPNTTRfVD93oSGe9nNCYZTX # wWU51Vfv9GB6hOylAfMRIeCmkjks/gqLOGElsh1MaVovNDTXS5IKV/HgaLaocJHV # 2P81uQ== # =FpIY # -----END PGP SIGNATURE----- # gpg: Signature made Mon 13 Mar 2023 18:55:57 GMT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20230313' of https://gitlab.com/rth7680/qemu: (91 commits) tcg: Drop tcg_const_* tcg: Drop tcg_const_*_vec target/tricore: Use min/max for saturate target/ppc: Avoid tcg_const_* in translate.c target/ppc: Fix gen_tlbsx_booke206 target/ppc: Rewrite trans_ADDG6S target/ppc: Avoid tcg_const_* in power8-pmu-regs.c.inc target/ppc: Avoid tcg_const_* in fp-impl.c.inc target/ppc: Avoid tcg_const_* in vsx-impl.c.inc target/ppc: Avoid tcg_const_* in xxeval target/ppc: Avoid tcg_const_* in vmx-impl.c.inc target/ppc: Avoid tcg_const_i64 in do_vcntmb target/m68k: Use tcg_constant_i32 in gen_ea_mode target/arm: Avoid tcg_const_ptr in handle_rev target/arm: Avoid tcg_const_ptr in handle_vec_simd_sqshrn target/arm: Avoid tcg_const_ptr in disas_simd_zip_trn target/arm: Avoid tcg_const_* in translate-mve.c target/arm: Avoid tcg_const_ptr in gen_sve_{ldr,str} target/arm: Improve trans_BFCI target/arm: Create gen_set_rmode, gen_restore_rmode ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/s390x')
-rw-r--r--target/s390x/cpu-param.h1
-rw-r--r--target/s390x/tcg/translate.c208
-rw-r--r--target/s390x/tcg/translate_vx.c.inc143
3 files changed, 21 insertions, 331 deletions
diff --git a/target/s390x/cpu-param.h b/target/s390x/cpu-param.h
index bf951a0..84ca086 100644
--- a/target/s390x/cpu-param.h
+++ b/target/s390x/cpu-param.h
@@ -12,6 +12,5 @@
#define TARGET_PAGE_BITS 12
#define TARGET_PHYS_ADDR_SPACE_BITS 64
#define TARGET_VIRT_ADDR_SPACE_BITS 64
-#define NB_MMU_MODES 4
#endif
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index d324c0b..14c3896 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -156,8 +156,6 @@ struct DisasContext {
typedef struct {
TCGCond cond:8;
bool is_64;
- bool g1;
- bool g2;
union {
struct { TCGv_i64 a, b; } s64;
struct { TCGv_i32 a, b; } s32;
@@ -308,8 +306,6 @@ static TCGv_i128 load_freg_128(int reg)
TCGv_i128 r = tcg_temp_new_i128();
tcg_gen_concat_i64_i128(r, l, h);
- tcg_temp_free_i64(h);
- tcg_temp_free_i64(l);
return r;
}
@@ -722,7 +718,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER);
c->u.s32.a = cc_op;
c->u.s32.b = cc_op;
- c->g1 = c->g2 = true;
c->is_64 = false;
return;
}
@@ -839,7 +834,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
/* Load up the arguments of the comparison. */
c->is_64 = true;
- c->g1 = c->g2 = false;
switch (old_cc_op) {
case CC_OP_LTGT0_32:
c->is_64 = false;
@@ -861,13 +855,11 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_FLOGR:
c->u.s64.a = cc_dst;
c->u.s64.b = tcg_constant_i64(0);
- c->g1 = true;
break;
case CC_OP_LTGT_64:
case CC_OP_LTUGTU_64:
c->u.s64.a = cc_src;
c->u.s64.b = cc_dst;
- c->g1 = c->g2 = true;
break;
case CC_OP_TM_32:
@@ -882,7 +874,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_SUBU:
c->is_64 = true;
c->u.s64.b = tcg_constant_i64(0);
- c->g1 = true;
switch (mask) {
case 8 | 2:
case 4 | 1: /* result */
@@ -900,7 +891,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_STATIC:
c->is_64 = false;
c->u.s32.a = cc_op;
- c->g1 = true;
switch (mask) {
case 0x8 | 0x4 | 0x2: /* cc != 3 */
cond = TCG_COND_NE;
@@ -916,7 +906,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
break;
case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
cond = TCG_COND_EQ;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
@@ -935,7 +924,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
break;
case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
cond = TCG_COND_NE;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
@@ -959,7 +947,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
default:
/* CC is masked by something else: (8 >> cc) & mask. */
cond = TCG_COND_NE;
- c->g1 = false;
c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
@@ -974,24 +961,6 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
c->cond = cond;
}
-static void free_compare(DisasCompare *c)
-{
- if (!c->g1) {
- if (c->is_64) {
- tcg_temp_free_i64(c->u.s64.a);
- } else {
- tcg_temp_free_i32(c->u.s32.a);
- }
- }
- if (!c->g2) {
- if (c->is_64) {
- tcg_temp_free_i64(c->u.s64.b);
- } else {
- tcg_temp_free_i32(c->u.s32.b);
- }
- }
-}
-
/* ====================================================================== */
/* Define the insn format enumeration. */
#define F0(N) FMT_##N,
@@ -1092,7 +1061,6 @@ static const DisasFormatInfo format_info[] = {
them, and store them back. See the "in1", "in2", "prep", "wout" sets
of routines below for more details. */
typedef struct {
- bool g_out, g_out2, g_in1, g_in2;
TCGv_i64 out, out2, in1, in2;
TCGv_i64 addr1;
TCGv_i128 out_128, in1_128, in2_128;
@@ -1292,17 +1260,14 @@ static DisasJumpType help_branch(DisasContext *s, DisasCompare *c,
TCGv_i64 z = tcg_constant_i64(0);
tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b);
tcg_gen_extu_i32_i64(t1, t0);
- tcg_temp_free_i32(t0);
tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next);
per_branch_cond(s, TCG_COND_NE, t1, z);
- tcg_temp_free_i64(t1);
}
ret = DISAS_PC_UPDATED;
}
egress:
- free_compare(c);
return ret;
}
@@ -1462,11 +1427,11 @@ static DisasJumpType op_andi(DisasContext *s, DisasOps *o)
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_ori_i64(o->in2, o->in2, ~mask);
- tcg_gen_and_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_ori_i64(t, t, ~mask);
+ tcg_gen_and_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -1555,7 +1520,6 @@ static void save_link_info(DisasContext *s, DisasOps *o)
tcg_gen_extu_i32_i64(t, cc_op);
tcg_gen_shli_i64(t, t, 28);
tcg_gen_or_i64(o->out, o->out, t);
- tcg_temp_free_i64(t);
}
static DisasJumpType op_bal(DisasContext *s, DisasOps *o)
@@ -1612,8 +1576,6 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
c.cond = TCG_COND_NE;
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_subi_i64(t, regs[r1], 1);
@@ -1621,7 +1583,6 @@ static DisasJumpType op_bct32(DisasContext *s, DisasOps *o)
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1635,8 +1596,6 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
c.cond = TCG_COND_NE;
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, regs[r1], 32);
@@ -1645,7 +1604,6 @@ static DisasJumpType op_bcth(DisasContext *s, DisasOps *o)
c.u.s32.a = tcg_temp_new_i32();
c.u.s32.b = tcg_constant_i32(0);
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, 1, imm, o->in2);
}
@@ -1659,8 +1617,6 @@ static DisasJumpType op_bct64(DisasContext *s, DisasOps *o)
c.cond = TCG_COND_NE;
c.is_64 = true;
- c.g1 = true;
- c.g2 = false;
tcg_gen_subi_i64(regs[r1], regs[r1], 1);
c.u.s64.a = regs[r1];
@@ -1680,8 +1636,6 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT);
c.is_64 = false;
- c.g1 = false;
- c.g2 = false;
t = tcg_temp_new_i64();
tcg_gen_add_i64(t, regs[r1], regs[r3]);
@@ -1690,7 +1644,6 @@ static DisasJumpType op_bx32(DisasContext *s, DisasOps *o)
tcg_gen_extrl_i64_i32(c.u.s32.a, t);
tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]);
store_reg32_i64(r1, t);
- tcg_temp_free_i64(t);
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1708,15 +1661,12 @@ static DisasJumpType op_bx64(DisasContext *s, DisasOps *o)
if (r1 == (r3 | 1)) {
c.u.s64.b = load_reg(r3 | 1);
- c.g2 = false;
} else {
c.u.s64.b = regs[r3 | 1];
- c.g2 = true;
}
tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]);
c.u.s64.a = regs[r1];
- c.g1 = true;
return help_branch(s, &c, is_imm, imm, o->in2);
}
@@ -1731,7 +1681,7 @@ static DisasJumpType op_cj(DisasContext *s, DisasOps *o)
if (s->insn->data) {
c.cond = tcg_unsigned_cond(c.cond);
}
- c.is_64 = c.g1 = c.g2 = true;
+ c.is_64 = true;
c.u.s64.a = o->in1;
c.u.s64.b = o->in2;
@@ -2012,11 +1962,9 @@ static DisasJumpType op_cksm(DisasContext *s, DisasOps *o)
gen_helper_cksm(pair, cpu_env, o->in1, o->in2, regs[r2 + 1]);
set_cc_static(s);
tcg_gen_extr_i128_i64(o->out, len, pair);
- tcg_temp_free_i128(pair);
tcg_gen_add_i64(regs[r2], regs[r2], len);
tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len);
- tcg_temp_free_i64(len);
return DISAS_NEXT;
}
@@ -2118,7 +2066,6 @@ static DisasJumpType op_clm(DisasContext *s, DisasOps *o)
tcg_gen_extrl_i64_i32(t1, o->in1);
gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2);
set_cc_static(s);
- tcg_temp_free_i32(t1);
return DISAS_NEXT;
}
@@ -2128,7 +2075,6 @@ static DisasJumpType op_clst(DisasContext *s, DisasOps *o)
gen_helper_clst(pair, cpu_env, regs[0], o->in1, o->in2);
tcg_gen_extr_i128_i64(o->in2, o->in1, pair);
- tcg_temp_free_i128(pair);
set_cc_static(s);
return DISAS_NEXT;
@@ -2140,7 +2086,6 @@ static DisasJumpType op_cps(DisasContext *s, DisasOps *o)
tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull);
tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull);
tcg_gen_or_i64(o->out, o->out, t);
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -2156,14 +2101,12 @@ static DisasJumpType op_cs(DisasContext *s, DisasOps *o)
addr = get_address(s, 0, b2, d2);
tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1,
get_mem_index(s), s->insn->data | MO_ALIGN);
- tcg_temp_free_i64(addr);
/* Are the memory and expected values (un)equal? Note that this setcond
produces the output CC value, thus the NE sense of the test. */
cc = tcg_temp_new_i64();
tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out);
tcg_gen_extrl_i64_i32(cc_op, cc);
- tcg_temp_free_i64(cc);
set_cc_static(s);
return DISAS_NEXT;
@@ -2223,7 +2166,6 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE));
tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2,
get_mem_index(s), mop | MO_ALIGN);
- tcg_temp_free_i64(addr);
/* Are the memory and expected values (un)equal? */
cc = tcg_temp_new_i64();
@@ -2237,14 +2179,12 @@ static DisasJumpType op_csp(DisasContext *s, DisasOps *o)
} else {
tcg_gen_mov_i64(o->out, old);
}
- tcg_temp_free_i64(old);
/* If the comparison was equal, and the LSB of R2 was set,
then we need to flush the TLB (for all cpus). */
tcg_gen_xori_i64(cc, cc, 1);
tcg_gen_and_i64(cc, cc, o->in2);
tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab);
- tcg_temp_free_i64(cc);
gen_helper_purge(cpu_env);
gen_set_label(lab);
@@ -2259,9 +2199,7 @@ static DisasJumpType op_cvd(DisasContext *s, DisasOps *o)
TCGv_i32 t2 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t2, o->in1);
gen_helper_cvd(t1, t2);
- tcg_temp_free_i32(t2);
tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s));
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -2363,7 +2301,6 @@ static DisasJumpType op_divs64(DisasContext *s, DisasOps *o)
gen_helper_divs64(t, cpu_env, o->in1, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
- tcg_temp_free_i128(t);
return DISAS_NEXT;
}
@@ -2373,7 +2310,6 @@ static DisasJumpType op_divu64(DisasContext *s, DisasOps *o)
gen_helper_divu64(t, cpu_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, t);
- tcg_temp_free_i128(t);
return DISAS_NEXT;
}
@@ -2428,8 +2364,6 @@ static DisasJumpType op_epsw(DisasContext *s, DisasOps *o)
if (r2 != 0) {
store_reg32_i64(r2, psw_mask);
}
-
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -2569,7 +2503,6 @@ static DisasJumpType op_icm(DisasContext *s, DisasOps *o)
tcg_gen_movi_i64(tmp, ccm);
gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -2592,8 +2525,6 @@ static DisasJumpType op_ipm(DisasContext *s, DisasOps *o)
tcg_gen_extu_i32_i64(t2, cc_op);
tcg_gen_deposit_i64(t1, t1, t2, 4, 60);
tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NEXT;
}
@@ -2925,21 +2856,17 @@ static DisasJumpType op_loc(DisasContext *s, DisasOps *o)
if (c.is_64) {
tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b,
o->in2, o->in1);
- free_compare(&c);
} else {
TCGv_i32 t32 = tcg_temp_new_i32();
TCGv_i64 t, z;
tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b);
- free_compare(&c);
t = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t, t32);
- tcg_temp_free_i32(t32);
z = tcg_constant_i64(0);
tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1);
- tcg_temp_free_i64(t);
}
return DISAS_NEXT;
@@ -2996,8 +2923,6 @@ static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o)
/* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */
tcg_gen_shli_i64(t1, t1, 32);
gen_helper_load_psw(cpu_env, t1, t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NORETURN;
}
@@ -3014,8 +2939,6 @@ static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o)
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s));
gen_helper_load_psw(cpu_env, t1, t2);
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
return DISAS_NORETURN;
}
#endif
@@ -3040,7 +2963,6 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32_i64(r1, t1);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3055,8 +2977,6 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t2);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3069,9 +2989,6 @@ static DisasJumpType op_lm32(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32_i64(r1, t1);
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3086,7 +3003,6 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
if (unlikely(r1 == r3)) {
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32h_i64(r1, t1);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3101,8 +3017,6 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t2);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3115,9 +3029,6 @@ static DisasJumpType op_lmh(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s));
store_reg32h_i64(r1, t1);
}
- tcg_temp_free(t2);
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3141,11 +3052,9 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15));
tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s));
tcg_gen_mov_i64(regs[r1], t1);
- tcg_temp_free(t2);
/* Only two registers to read. */
if (((r1 + 1) & 15) == r3) {
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -3157,8 +3066,6 @@ static DisasJumpType op_lm64(DisasContext *s, DisasOps *o)
tcg_gen_add_i64(o->in2, o->in2, t1);
tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s));
}
- tcg_temp_free(t1);
-
return DISAS_NEXT;
}
@@ -3180,8 +3087,6 @@ static DisasJumpType op_lpd(DisasContext *s, DisasOps *o)
a2 = get_address(s, 0, get_field(s, b2), get_field(s, d2));
tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN);
tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN);
- tcg_temp_free_i64(a1);
- tcg_temp_free_i64(a2);
/* ... and indicate that we performed them while interlocked. */
gen_op_movi_cc(s, 0);
@@ -3253,9 +3158,7 @@ static DisasJumpType op_mc(DisasContext *s, DisasOps *o)
static DisasJumpType op_mov2(DisasContext *s, DisasOps *o)
{
o->out = o->in2;
- o->g_out = o->g_in2;
o->in2 = NULL;
- o->g_in2 = false;
return DISAS_NEXT;
}
@@ -3265,9 +3168,7 @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
TCGv ar1 = tcg_temp_new_i64();
o->out = o->in2;
- o->g_out = o->g_in2;
o->in2 = NULL;
- o->g_in2 = false;
switch (s->base.tb->flags & FLAG_MASK_ASC) {
case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT:
@@ -3289,8 +3190,6 @@ static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o)
}
tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1]));
- tcg_temp_free_i64(ar1);
-
return DISAS_NEXT;
}
@@ -3298,11 +3197,8 @@ static DisasJumpType op_movx(DisasContext *s, DisasOps *o)
{
o->out = o->in1;
o->out2 = o->in2;
- o->g_out = o->g_in1;
- o->g_out2 = o->g_in2;
o->in1 = NULL;
o->in2 = NULL;
- o->g_in1 = o->g_in2 = false;
return DISAS_NEXT;
}
@@ -3509,7 +3405,6 @@ static DisasJumpType op_maeb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3517,7 +3412,6 @@ static DisasJumpType op_madb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3525,7 +3419,6 @@ static DisasJumpType op_mseb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg32_i64(get_field(s, r3));
gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3533,7 +3426,6 @@ static DisasJumpType op_msdb(DisasContext *s, DisasOps *o)
{
TCGv_i64 r3 = load_freg(get_field(s, r3));
gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3);
- tcg_temp_free_i64(r3);
return DISAS_NEXT;
}
@@ -3544,7 +3436,6 @@ static DisasJumpType op_nabs(DisasContext *s, DisasOps *o)
tcg_gen_neg_i64(n, o->in2);
tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2);
- tcg_temp_free_i64(n);
return DISAS_NEXT;
}
@@ -3621,10 +3512,10 @@ static DisasJumpType op_ori(DisasContext *s, DisasOps *o)
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_or_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_or_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -3809,7 +3700,6 @@ static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o)
/* If this is a test-only form, arrange to discard the result. */
if (i3 & 0x80) {
o->out = tcg_temp_new_i64();
- o->g_out = false;
}
i3 &= 63;
@@ -3879,9 +3769,6 @@ static DisasJumpType op_rll32(DisasContext *s, DisasOps *o)
tcg_gen_extrl_i64_i32(t2, o->in2);
tcg_gen_rotl_i32(to, t1, t2);
tcg_gen_extu_i32_i64(o->out, to);
- tcg_temp_free_i32(t1);
- tcg_temp_free_i32(t2);
- tcg_temp_free_i32(to);
return DISAS_NEXT;
}
@@ -4022,7 +3909,6 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
} else {
tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab);
}
- free_compare(&c);
r1 = get_field(s, r1);
a = get_address(s, 0, get_field(s, b2), get_field(s, d2));
@@ -4037,12 +3923,10 @@ static DisasJumpType op_soc(DisasContext *s, DisasOps *o)
h = tcg_temp_new_i64();
tcg_gen_shri_i64(h, regs[r1], 32);
tcg_gen_qemu_st32(h, a, get_mem_index(s));
- tcg_temp_free_i64(h);
break;
default:
g_assert_not_reached();
}
- tcg_temp_free_i64(a);
gen_set_label(lab);
return DISAS_NEXT;
@@ -4059,9 +3943,6 @@ static DisasJumpType op_sla(DisasContext *s, DisasOps *o)
t = o->in1;
}
gen_op_update2_cc_i64(s, CC_OP_SLA, t, o->in2);
- if (s->insn->data == 31) {
- tcg_temp_free_i64(t);
- }
tcg_gen_shl_i64(o->out, o->in1, o->in2);
/* The arithmetic left shift is curious in that it does not affect
the sign bit. Copy that over from the source unchanged. */
@@ -4128,8 +4009,6 @@ static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o)
tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3);
tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc));
-
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4170,8 +4049,6 @@ static DisasJumpType op_ectg(DisasContext *s, DisasOps *o)
/* store second operand in GR1 */
tcg_gen_mov_i64(regs[1], o->in2);
-
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4231,9 +4108,6 @@ static DisasJumpType op_stcke(DisasContext *s, DisasOps *o)
tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s));
tcg_gen_addi_i64(o->in2, o->in2, 8);
tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s));
- tcg_temp_free_i64(c1);
- tcg_temp_free_i64(c2);
- tcg_temp_free_i64(todpr);
/* ??? We don't implement clock states. */
gen_op_movi_cc(s, 0);
return DISAS_NEXT;
@@ -4447,7 +4321,6 @@ static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o)
t = tcg_temp_new_i64();
tcg_gen_shri_i64(t, psw_mask, 56);
tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s));
- tcg_temp_free_i64(t);
if (s->fields.op == 0xac) {
tcg_gen_andi_i64(psw_mask, psw_mask,
@@ -4558,7 +4431,6 @@ static DisasJumpType op_stcm(DisasContext *s, DisasOps *o)
}
break;
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -4602,8 +4474,6 @@ static DisasJumpType op_stmh(DisasContext *s, DisasOps *o)
tcg_gen_add_i64(o->in2, o->in2, t4);
r1 = (r1 + 1) & 15;
}
-
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -4790,7 +4660,6 @@ static DisasJumpType op_tre(DisasContext *s, DisasOps *o)
gen_helper_tre(pair, cpu_env, o->out, o->out2, o->in2);
tcg_gen_extr_i128_i64(o->out2, o->out, pair);
- tcg_temp_free_i128(pair);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4836,7 +4705,6 @@ static DisasJumpType op_trXX(DisasContext *s, DisasOps *o)
}
gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes);
- tcg_temp_free_i32(tst);
set_cc_static(s);
return DISAS_NEXT;
}
@@ -4955,10 +4823,10 @@ static DisasJumpType op_xori(DisasContext *s, DisasOps *o)
int shift = s->insn->data & 0xff;
int size = s->insn->data >> 8;
uint64_t mask = ((1ull << size) - 1) << shift;
+ TCGv_i64 t = tcg_temp_new_i64();
- assert(!o->g_in2);
- tcg_gen_shli_i64(o->in2, o->in2, shift);
- tcg_gen_xor_i64(o->out, o->in1, o->in2);
+ tcg_gen_shli_i64(t, o->in2, shift);
+ tcg_gen_xor_i64(o->out, o->in1, t);
/* Produce the CC from only the bits manipulated. */
tcg_gen_andi_i64(cc_dst, o->out, mask);
@@ -4989,15 +4857,14 @@ static DisasJumpType op_xi(DisasContext *s, DisasOps *o)
static DisasJumpType op_zero(DisasContext *s, DisasOps *o)
{
- o->out = tcg_const_i64(0);
+ o->out = tcg_constant_i64(0);
return DISAS_NEXT;
}
static DisasJumpType op_zero2(DisasContext *s, DisasOps *o)
{
- o->out = tcg_const_i64(0);
+ o->out = tcg_constant_i64(0);
o->out2 = o->out;
- o->g_out2 = true;
return DISAS_NEXT;
}
@@ -5265,7 +5132,6 @@ static void prep_new_x(DisasContext *s, DisasOps *o)
static void prep_r1(DisasContext *s, DisasOps *o)
{
o->out = regs[get_field(s, r1)];
- o->g_out = true;
}
#define SPEC_prep_r1 0
@@ -5274,7 +5140,6 @@ static void prep_r1_P(DisasContext *s, DisasOps *o)
int r1 = get_field(s, r1);
o->out = regs[r1];
o->out2 = regs[r1 + 1];
- o->g_out = o->g_out2 = true;
}
#define SPEC_prep_r1_P SPEC_r1_even
@@ -5343,7 +5208,6 @@ static void wout_r1_D32(DisasContext *s, DisasOps *o)
store_reg32_i64(r1 + 1, o->out);
tcg_gen_shri_i64(t, o->out, 32);
store_reg32_i64(r1, t);
- tcg_temp_free_i64(t);
}
#define SPEC_wout_r1_D32 SPEC_r1_even
@@ -5499,7 +5363,6 @@ static void in1_r1(DisasContext *s, DisasOps *o)
static void in1_r1_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r1)];
- o->g_in1 = true;
}
#define SPEC_in1_r1_o 0
@@ -5533,7 +5396,6 @@ static void in1_r1p1(DisasContext *s, DisasOps *o)
static void in1_r1p1_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r1) + 1];
- o->g_in1 = true;
}
#define SPEC_in1_r1p1_o SPEC_r1_even
@@ -5588,7 +5450,6 @@ static void in1_r3(DisasContext *s, DisasOps *o)
static void in1_r3_o(DisasContext *s, DisasOps *o)
{
o->in1 = regs[get_field(s, r3)];
- o->g_in1 = true;
}
#define SPEC_in1_r3_o 0
@@ -5719,7 +5580,6 @@ static void in1_m1_64(DisasContext *s, DisasOps *o)
static void in2_r1_o(DisasContext *s, DisasOps *o)
{
o->in2 = regs[get_field(s, r1)];
- o->g_in2 = true;
}
#define SPEC_in2_r1_o 0
@@ -5754,7 +5614,6 @@ static void in2_r2(DisasContext *s, DisasOps *o)
static void in2_r2_o(DisasContext *s, DisasOps *o)
{
o->in2 = regs[get_field(s, r2)];
- o->g_in2 = true;
}
#define SPEC_in2_r2_o 0
@@ -5903,7 +5762,7 @@ static void in2_sh(DisasContext *s, DisasOps *o)
int d2 = get_field(s, d2);
if (b2 == 0) {
- o->in2 = tcg_const_i64(d2 & 0x3f);
+ o->in2 = tcg_constant_i64(d2 & 0x3f);
} else {
o->in2 = get_address(s, 0, b2, d2);
tcg_gen_andi_i64(o->in2, o->in2, 0x3f);
@@ -6016,46 +5875,46 @@ static void in2_mri2_64(DisasContext *s, DisasOps *o)
static void in2_i2(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64(get_field(s, i2));
+ o->in2 = tcg_constant_i64(get_field(s, i2));
}
#define SPEC_in2_i2 0
static void in2_i2_8u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint8_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint8_t)get_field(s, i2));
}
#define SPEC_in2_i2_8u 0
static void in2_i2_16u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint16_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint16_t)get_field(s, i2));
}
#define SPEC_in2_i2_16u 0
static void in2_i2_32u(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64((uint32_t)get_field(s, i2));
+ o->in2 = tcg_constant_i64((uint32_t)get_field(s, i2));
}
#define SPEC_in2_i2_32u 0
static void in2_i2_16u_shl(DisasContext *s, DisasOps *o)
{
uint64_t i2 = (uint16_t)get_field(s, i2);
- o->in2 = tcg_const_i64(i2 << s->insn->data);
+ o->in2 = tcg_constant_i64(i2 << s->insn->data);
}
#define SPEC_in2_i2_16u_shl 0
static void in2_i2_32u_shl(DisasContext *s, DisasOps *o)
{
uint64_t i2 = (uint32_t)get_field(s, i2);
- o->in2 = tcg_const_i64(i2 << s->insn->data);
+ o->in2 = tcg_constant_i64(i2 << s->insn->data);
}
#define SPEC_in2_i2_32u_shl 0
#ifndef CONFIG_USER_ONLY
static void in2_insn(DisasContext *s, DisasOps *o)
{
- o->in2 = tcg_const_i64(s->fields.raw_insn);
+ o->in2 = tcg_constant_i64(s->fields.raw_insn);
}
#define SPEC_in2_insn 0
#endif
@@ -6481,31 +6340,6 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s)
}
}
- /* Free any temporaries created by the helpers. */
- if (o.out && !o.g_out) {
- tcg_temp_free_i64(o.out);
- }
- if (o.out2 && !o.g_out2) {
- tcg_temp_free_i64(o.out2);
- }
- if (o.in1 && !o.g_in1) {
- tcg_temp_free_i64(o.in1);
- }
- if (o.in2 && !o.g_in2) {
- tcg_temp_free_i64(o.in2);
- }
- if (o.addr1) {
- tcg_temp_free_i64(o.addr1);
- }
- if (o.out_128) {
- tcg_temp_free_i128(o.out_128);
- }
- if (o.in1_128) {
- tcg_temp_free_i128(o.in1_128);
- }
- if (o.in2_128) {
- tcg_temp_free_i128(o.in2_128);
- }
/* io should be the last instruction in tb when icount is enabled */
if (unlikely(icount && ret == DISAS_NEXT)) {
ret = DISAS_TOO_MANY;
diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc
index 3fadc82..43dfbfd 100644
--- a/target/s390x/tcg/translate_vx.c.inc
+++ b/target/s390x/tcg/translate_vx.c.inc
@@ -183,8 +183,6 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
/* generate the final ptr by adding cpu_env */
tcg_gen_trunc_i64_ptr(ptr, tmp);
tcg_gen_add_ptr(ptr, ptr, cpu_env);
-
- tcg_temp_free_i64(tmp);
}
#define gen_gvec_2(v1, v2, gen) \
@@ -272,13 +270,6 @@ static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a,
fn(dl, dh, al, ah, bl, bh);
write_vec_element_i64(dh, d, 0, ES_64);
write_vec_element_i64(dl, d, 1, ES_64);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(dl);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(al);
- tcg_temp_free_i64(bh);
- tcg_temp_free_i64(bl);
}
typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
@@ -305,15 +296,6 @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
fn(dl, dh, al, ah, bl, bh, cl, ch);
write_vec_element_i64(dh, d, 0, ES_64);
write_vec_element_i64(dl, d, 1, ES_64);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(dl);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(al);
- tcg_temp_free_i64(bh);
- tcg_temp_free_i64(bl);
- tcg_temp_free_i64(ch);
- tcg_temp_free_i64(cl);
}
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
@@ -351,7 +333,6 @@ static DisasJumpType op_vge(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -386,7 +367,6 @@ static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
write_vec_element_i64(t, get_field(s, v1), 0, ES_64);
tcg_gen_movi_i64(t, generate_byte_mask(i2));
write_vec_element_i64(t, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(t);
}
return DISAS_NEXT;
}
@@ -427,8 +407,6 @@ static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -451,7 +429,6 @@ static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o)
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
gen_gvec_dup_i64(es, get_field(s, v1), tmp);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -469,7 +446,6 @@ static DisasJumpType op_vlebr(DisasContext *s, DisasOps *o)
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -486,7 +462,6 @@ static DisasJumpType op_vlbrrep(DisasContext *s, DisasOps *o)
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
gen_gvec_dup_i64(es, get_field(s, v1), tmp);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -518,7 +493,6 @@ static DisasJumpType op_vllebrz(DisasContext *s, DisasOps *o)
write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
write_vec_element_i64(tcg_constant_i64(0), get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -572,9 +546,6 @@ static DisasJumpType op_vlbr(DisasContext *s, DisasOps *o)
write:
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -592,7 +563,6 @@ static DisasJumpType op_vle(DisasContext *s, DisasOps *o)
tmp = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
write_vec_element_i64(tmp, get_field(s, v1), enr, es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -647,8 +617,6 @@ static DisasJumpType op_vler(DisasContext *s, DisasOps *o)
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -688,8 +656,6 @@ static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o)
default:
g_assert_not_reached();
}
- tcg_temp_free_ptr(ptr);
-
return DISAS_NEXT;
}
@@ -730,7 +696,6 @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
gen_gvec_dup_imm(es, get_field(s, v1), 0);
write_vec_element_i64(t, get_field(s, v1), enr, es);
- tcg_temp_free_i64(t);
return DISAS_NEXT;
}
@@ -768,9 +733,6 @@ static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
/* Store the last element, loaded first */
write_vec_element_i64(t0, v1, 1, ES_64);
-
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -794,8 +756,6 @@ static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o)
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, bytes);
- tcg_temp_free_i64(bytes);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -835,8 +795,6 @@ static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o)
default:
g_assert_not_reached();
}
- tcg_temp_free_ptr(ptr);
-
return DISAS_NEXT;
}
@@ -856,7 +814,6 @@ static DisasJumpType op_vll(DisasContext *s, DisasOps *o)
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -898,7 +855,6 @@ static DisasJumpType op_vmr(DisasContext *s, DisasOps *o)
write_vec_element_i64(tmp, v1, dst_idx, es);
}
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -974,7 +930,6 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o)
}
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
- tcg_temp_free_i64(tmp);
} else {
gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
}
@@ -1004,8 +959,6 @@ static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o)
read_vec_element_i64(t1, get_field(s, v3), i3, ES_64);
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
return DISAS_NEXT;
}
@@ -1057,7 +1010,6 @@ static DisasJumpType op_vsce(DisasContext *s, DisasOps *o)
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1098,7 +1050,6 @@ static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN);
write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1116,7 +1067,6 @@ static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEUQ);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1134,7 +1084,6 @@ static DisasJumpType op_vstebr(DisasContext *s, DisasOps *o)
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_LE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1189,9 +1138,6 @@ write:
tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_LEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_LEUQ);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -1209,7 +1155,6 @@ static DisasJumpType op_vste(DisasContext *s, DisasOps *o)
tmp = tcg_temp_new_i64();
read_vec_element_i64(tmp, get_field(s, v1), enr, es);
tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1251,9 +1196,6 @@ static DisasJumpType op_vster(DisasContext *s, DisasOps *o)
tcg_gen_qemu_st_i64(t0, o->addr1, get_mem_index(s), MO_TEUQ);
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
tcg_gen_qemu_st_i64(t1, o->addr1, get_mem_index(s), MO_TEUQ);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
return DISAS_NEXT;
}
@@ -1284,7 +1226,6 @@ static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
}
gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1297,7 +1238,6 @@ static DisasJumpType op_vstl(DisasContext *s, DisasOps *o)
tcg_gen_addi_i64(o->in2, o->in2, 1);
tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
- tcg_temp_free_ptr(a0);
return DISAS_NEXT;
}
@@ -1335,7 +1275,6 @@ static DisasJumpType op_vup(DisasContext *s, DisasOps *o)
write_vec_element_i64(tmp, v1, dst_idx, dst_es);
}
}
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -1377,10 +1316,6 @@ static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es)
/* Isolate and shift the carry into position */
tcg_gen_and_i64(d, d, msb_mask);
tcg_gen_shri_i64(d, d, msb_bit_nr);
-
- tcg_temp_free_i64(t1);
- tcg_temp_free_i64(t2);
- tcg_temp_free_i64(t3);
}
static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@@ -1399,7 +1334,6 @@ static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
tcg_gen_add_i32(t, a, b);
tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b);
- tcg_temp_free_i32(t);
}
static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
@@ -1408,7 +1342,6 @@ static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
tcg_gen_add_i64(t, a, b);
tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b);
- tcg_temp_free_i64(t);
}
static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
@@ -1422,9 +1355,6 @@ static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(th);
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
@@ -1460,8 +1390,6 @@ static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
tcg_gen_extract_i64(tl, cl, 0, 1);
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
tcg_gen_add2_i64(dl, dh, dl, dh, tl, zero);
-
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
@@ -1490,9 +1418,6 @@ static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
@@ -1533,9 +1458,6 @@ static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
tcg_gen_addi_i64(t0, t0, 1);
tcg_gen_shri_i64(t0, t0, 1);
tcg_gen_extrl_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
@@ -1550,10 +1472,6 @@ static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
-
- tcg_temp_free_i64(dh);
- tcg_temp_free_i64(ah);
- tcg_temp_free_i64(bh);
}
static DisasJumpType op_vavg(DisasContext *s, DisasOps *o)
@@ -1586,9 +1504,6 @@ static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
tcg_gen_addi_i64(t0, t0, 1);
tcg_gen_shri_i64(t0, t0, 1);
tcg_gen_extrl_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
}
static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
@@ -1599,8 +1514,6 @@ static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
gen_addi2_i64(dl, dh, dl, dh, 1);
tcg_gen_extract2_i64(dl, dl, dh, 1);
-
- tcg_temp_free_i64(dh);
}
static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
@@ -1635,9 +1548,6 @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
}
gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
-
- tcg_temp_free_i32(tmp);
- tcg_temp_free_i32(sum);
return DISAS_NEXT;
}
@@ -1682,9 +1592,6 @@ static DisasJumpType op_vc(DisasContext *s, DisasOps *o)
read_vec_element_i64(high, get_field(s, v1), 0, ES_64);
read_vec_element_i64(low, get_field(s, v1), 1, ES_64);
gen_op_update2_cc_i64(s, CC_OP_VC, low, high);
-
- tcg_temp_free_i64(low);
- tcg_temp_free_i64(high);
}
return DISAS_NEXT;
}
@@ -1853,8 +1760,6 @@ static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
tcg_gen_mul_i32(t0, a, b);
tcg_gen_add_i32(d, t0, c);
-
- tcg_temp_free_i32(t0);
}
static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
@@ -1869,10 +1774,6 @@ static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
tcg_gen_mul_i64(t0, t0, t1);
tcg_gen_add_i64(t0, t0, t2);
tcg_gen_extrh_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
@@ -1887,10 +1788,6 @@ static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
tcg_gen_mul_i64(t0, t0, t1);
tcg_gen_add_i64(t0, t0, t2);
tcg_gen_extrh_i64_i32(d, t0);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
}
static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
@@ -1974,7 +1871,6 @@ static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_muls2_i32(t, d, a, b);
- tcg_temp_free_i32(t);
}
static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
@@ -1982,7 +1878,6 @@ static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
TCGv_i32 t = tcg_temp_new_i32();
tcg_gen_mulu2_i32(t, d, a, b);
- tcg_temp_free_i32(t);
}
static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
@@ -2099,11 +1994,6 @@ static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o)
/* Store final result into v1. */
write_vec_element_i64(h1, get_field(s, v1), 0, ES_64);
write_vec_element_i64(l1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free_i64(l1);
- tcg_temp_free_i64(h1);
- tcg_temp_free_i64(l2);
- tcg_temp_free_i64(h2);
return DISAS_NEXT;
}
@@ -2169,8 +2059,6 @@ static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c)
tcg_gen_and_i32(t, t, b);
tcg_gen_andc_i32(d, d, b);
tcg_gen_or_i32(d, d, t);
-
- tcg_temp_free_i32(t);
}
static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
@@ -2181,8 +2069,6 @@ static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
tcg_gen_and_i64(t, t, b);
tcg_gen_andc_i64(d, d, b);
tcg_gen_or_i64(d, d, t);
-
- tcg_temp_free_i64(t);
}
static DisasJumpType op_verim(DisasContext *s, DisasOps *o)
@@ -2291,7 +2177,6 @@ static DisasJumpType op_ves(DisasContext *s, DisasOps *o)
default:
g_assert_not_reached();
}
- tcg_temp_free_i32(shift);
}
return DISAS_NEXT;
}
@@ -2311,7 +2196,6 @@ static DisasJumpType gen_vsh_by_byte(DisasContext *s, DisasOps *o,
read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
tcg_gen_andi_i64(shift, shift, byte ? 0x78 : 7);
gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2), shift, 0, gen);
- tcg_temp_free_i64(shift);
}
return DISAS_NEXT;
}
@@ -2367,10 +2251,6 @@ static DisasJumpType op_vsld(DisasContext *s, DisasOps *o)
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
return DISAS_NEXT;
}
@@ -2397,10 +2277,6 @@ static DisasJumpType op_vsrd(DisasContext *s, DisasOps *o)
write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_temp_free(t2);
return DISAS_NEXT;
}
@@ -2445,9 +2321,6 @@ static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
/* "invert" the result: -1 -> 0; 0 -> 1 */
tcg_gen_addi_i64(dl, th, 1);
tcg_gen_mov_i64(dh, zero);
-
- tcg_temp_free_i64(th);
- tcg_temp_free_i64(tl);
}
static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
@@ -2482,8 +2355,6 @@ static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
tcg_gen_not_i64(tl, bl);
tcg_gen_not_i64(th, bh);
gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch);
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o)
@@ -2508,9 +2379,6 @@ static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
tcg_gen_not_i64(tl, bl);
tcg_gen_not_i64(th, bh);
gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch);
-
- tcg_temp_free_i64(tl);
- tcg_temp_free_i64(th);
}
static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o)
@@ -2550,8 +2418,6 @@ static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o)
}
write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64);
}
- tcg_temp_free_i64(sum);
- tcg_temp_free_i64(tmp);
return DISAS_NEXT;
}
@@ -2580,10 +2446,6 @@ static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
}
write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64);
write_vec_element_i64(suml, get_field(s, v1), 1, ES_64);
-
- tcg_temp_free_i64(sumh);
- tcg_temp_free_i64(suml);
- tcg_temp_free_i64(tmpl);
return DISAS_NEXT;
}
@@ -2611,8 +2473,6 @@ static DisasJumpType op_vsum(DisasContext *s, DisasOps *o)
}
write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32);
}
- tcg_temp_free_i32(sum);
- tcg_temp_free_i32(tmp);
return DISAS_NEXT;
}
@@ -3399,9 +3259,6 @@ static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o)
read_vec_element_i64(tmp, v2, 1, ES_64);
write_vec_element_i64(tmp, v1, 1, ES_64);
}
-
- tcg_temp_free_i64(tmp);
-
return DISAS_NEXT;
}