aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/devel/tcg-ops.rst2
-rw-r--r--include/tcg/tcg-cond.h64
-rw-r--r--target/alpha/translate.c94
-rw-r--r--target/m68k/translate.c74
-rw-r--r--target/s390x/tcg/translate.c84
-rw-r--r--target/sparc/translate.c4
-rw-r--r--tcg/aarch64/tcg-target-con-set.h5
-rw-r--r--tcg/aarch64/tcg-target-con-str.h1
-rw-r--r--tcg/aarch64/tcg-target.c.inc160
-rw-r--r--tcg/aarch64/tcg-target.h2
-rw-r--r--tcg/arm/tcg-target.c.inc62
-rw-r--r--tcg/arm/tcg-target.h2
-rw-r--r--tcg/i386/tcg-target-con-set.h6
-rw-r--r--tcg/i386/tcg-target-con-str.h1
-rw-r--r--tcg/i386/tcg-target.c.inc201
-rw-r--r--tcg/i386/tcg-target.h2
-rw-r--r--tcg/loongarch64/tcg-target.c.inc3
-rw-r--r--tcg/loongarch64/tcg-target.h2
-rw-r--r--tcg/mips/tcg-target.c.inc3
-rw-r--r--tcg/mips/tcg-target.h2
-rw-r--r--tcg/optimize.c474
-rw-r--r--tcg/ppc/tcg-target-con-set.h5
-rw-r--r--tcg/ppc/tcg-target-con-str.h1
-rw-r--r--tcg/ppc/tcg-target.c.inc294
-rw-r--r--tcg/ppc/tcg-target.h2
-rw-r--r--tcg/riscv/tcg-target.c.inc3
-rw-r--r--tcg/riscv/tcg-target.h2
-rw-r--r--tcg/s390x/tcg-target-con-set.h8
-rw-r--r--tcg/s390x/tcg-target-con-str.h3
-rw-r--r--tcg/s390x/tcg-target.c.inc244
-rw-r--r--tcg/s390x/tcg-target.h2
-rw-r--r--tcg/sparc64/tcg-target.c.inc65
-rw-r--r--tcg/sparc64/tcg-target.h2
-rw-r--r--tcg/tcg-internal.h2
-rw-r--r--tcg/tcg.c40
-rw-r--r--tcg/tci.c14
-rw-r--r--tcg/tci/tcg-target.c.inc3
-rw-r--r--tcg/tci/tcg-target.h2
38 files changed, 1362 insertions, 578 deletions
diff --git a/docs/devel/tcg-ops.rst b/docs/devel/tcg-ops.rst
index 8ae59ea..d46b625 100644
--- a/docs/devel/tcg-ops.rst
+++ b/docs/devel/tcg-ops.rst
@@ -253,6 +253,8 @@ Jumps/Labels
| ``TCG_COND_GEU /* unsigned */``
| ``TCG_COND_LEU /* unsigned */``
| ``TCG_COND_GTU /* unsigned */``
+ | ``TCG_COND_TSTEQ /* t1 & t2 == 0 */``
+ | ``TCG_COND_TSTNE /* t1 & t2 != 0 */``
Arithmetic
----------
diff --git a/include/tcg/tcg-cond.h b/include/tcg/tcg-cond.h
index 2a38a38..5cadbd6 100644
--- a/include/tcg/tcg-cond.h
+++ b/include/tcg/tcg-cond.h
@@ -29,26 +29,34 @@
* Conditions. Note that these are laid out for easy manipulation by
* the functions below:
* bit 0 is used for inverting;
- * bit 1 is signed,
- * bit 2 is unsigned,
- * bit 3 is used with bit 0 for swapping signed/unsigned.
+ * bit 1 is used for conditions that need swapping (signed/unsigned).
+ * bit 2 is used with bit 1 for swapping.
+ * bit 3 is used for unsigned conditions.
*/
typedef enum {
/* non-signed */
TCG_COND_NEVER = 0 | 0 | 0 | 0,
TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
+
+ /* equality */
TCG_COND_EQ = 8 | 0 | 0 | 0,
TCG_COND_NE = 8 | 0 | 0 | 1,
+
+ /* "test" i.e. and then compare vs 0 */
+ TCG_COND_TSTEQ = 8 | 4 | 0 | 0,
+ TCG_COND_TSTNE = 8 | 4 | 0 | 1,
+
/* signed */
TCG_COND_LT = 0 | 0 | 2 | 0,
TCG_COND_GE = 0 | 0 | 2 | 1,
- TCG_COND_LE = 8 | 0 | 2 | 0,
- TCG_COND_GT = 8 | 0 | 2 | 1,
+ TCG_COND_GT = 0 | 4 | 2 | 0,
+ TCG_COND_LE = 0 | 4 | 2 | 1,
+
/* unsigned */
- TCG_COND_LTU = 0 | 4 | 0 | 0,
- TCG_COND_GEU = 0 | 4 | 0 | 1,
- TCG_COND_LEU = 8 | 4 | 0 | 0,
- TCG_COND_GTU = 8 | 4 | 0 | 1,
+ TCG_COND_LTU = 8 | 0 | 2 | 0,
+ TCG_COND_GEU = 8 | 0 | 2 | 1,
+ TCG_COND_GTU = 8 | 4 | 2 | 0,
+ TCG_COND_LEU = 8 | 4 | 2 | 1,
} TCGCond;
/* Invert the sense of the comparison. */
@@ -60,25 +68,49 @@ static inline TCGCond tcg_invert_cond(TCGCond c)
/* Swap the operands in a comparison. */
static inline TCGCond tcg_swap_cond(TCGCond c)
{
- return c & 6 ? (TCGCond)(c ^ 9) : c;
+ return (TCGCond)(c ^ ((c & 2) << 1));
+}
+
+/* Must a comparison be considered signed? */
+static inline bool is_signed_cond(TCGCond c)
+{
+ return (c & (8 | 2)) == 2;
+}
+
+/* Must a comparison be considered unsigned? */
+static inline bool is_unsigned_cond(TCGCond c)
+{
+ return (c & (8 | 2)) == (8 | 2);
+}
+
+/* Must a comparison be considered a test? */
+static inline bool is_tst_cond(TCGCond c)
+{
+ return (c | 1) == TCG_COND_TSTNE;
}
/* Create an "unsigned" version of a "signed" comparison. */
static inline TCGCond tcg_unsigned_cond(TCGCond c)
{
- return c & 2 ? (TCGCond)(c ^ 6) : c;
+ return is_signed_cond(c) ? (TCGCond)(c + 8) : c;
}
/* Create a "signed" version of an "unsigned" comparison. */
static inline TCGCond tcg_signed_cond(TCGCond c)
{
- return c & 4 ? (TCGCond)(c ^ 6) : c;
+ return is_unsigned_cond(c) ? (TCGCond)(c - 8) : c;
}
-/* Must a comparison be considered unsigned? */
-static inline bool is_unsigned_cond(TCGCond c)
+/* Create the eq/ne version of a tsteq/tstne comparison. */
+static inline TCGCond tcg_tst_eqne_cond(TCGCond c)
+{
+ return is_tst_cond(c) ? (TCGCond)(c - 4) : c;
+}
+
+/* Create the lt/ge version of a tstne/tsteq comparison of the sign. */
+static inline TCGCond tcg_tst_ltge_cond(TCGCond c)
{
- return (c & 4) != 0;
+ return is_tst_cond(c) ? (TCGCond)(c ^ 0xf) : c;
}
/*
@@ -92,7 +124,7 @@ static inline TCGCond tcg_high_cond(TCGCond c)
case TCG_COND_LE:
case TCG_COND_GEU:
case TCG_COND_LEU:
- return (TCGCond)(c ^ 8);
+ return (TCGCond)(c ^ (4 | 1));
default:
return c;
}
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index 4b464f8..882cf6c 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -453,13 +453,13 @@ static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
}
static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
- TCGv cmp, int32_t disp)
+ TCGv cmp, uint64_t imm, int32_t disp)
{
uint64_t dest = ctx->base.pc_next + (disp << 2);
TCGLabel *lab_true = gen_new_label();
if (use_goto_tb(ctx, dest)) {
- tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
+ tcg_gen_brcondi_i64(cond, cmp, imm, lab_true);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
@@ -472,81 +472,71 @@ static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
return DISAS_NORETURN;
} else {
- TCGv_i64 z = load_zero(ctx);
+ TCGv_i64 i = tcg_constant_i64(imm);
TCGv_i64 d = tcg_constant_i64(dest);
TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);
- tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
+ tcg_gen_movcond_i64(cond, cpu_pc, cmp, i, d, p);
return DISAS_PC_UPDATED;
}
}
static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
- int32_t disp, int mask)
+ int32_t disp)
{
- if (mask) {
- TCGv tmp = tcg_temp_new();
- DisasJumpType ret;
-
- tcg_gen_andi_i64(tmp, load_gpr(ctx, ra), 1);
- ret = gen_bcond_internal(ctx, cond, tmp, disp);
- return ret;
- }
- return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra), disp);
+ return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra),
+ is_tst_cond(cond), disp);
}
/* Fold -0.0 for comparison with COND. */
-static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
+static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
{
- uint64_t mzero = 1ull << 63;
+ TCGv_i64 tmp;
- switch (cond) {
+ *pimm = 0;
+ switch (*pcond) {
case TCG_COND_LE:
case TCG_COND_GT:
/* For <= or >, the -0.0 value directly compares the way we want. */
- tcg_gen_mov_i64(dest, src);
- break;
+ return src;
case TCG_COND_EQ:
case TCG_COND_NE:
- /* For == or !=, we can simply mask off the sign bit and compare. */
- tcg_gen_andi_i64(dest, src, mzero - 1);
- break;
+ /* For == or !=, we can compare without the sign bit. */
+ *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
+ *pimm = INT64_MAX;
+ return src;
case TCG_COND_GE:
case TCG_COND_LT:
/* For >= or <, map -0.0 to +0.0. */
- tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero),
- src, tcg_constant_i64(0));
- break;
+ tmp = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
+ src, tcg_constant_i64(INT64_MIN),
+ tcg_constant_i64(0), src);
+ return tmp;
default:
- abort();
+ g_assert_not_reached();
}
}
static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
int32_t disp)
{
- TCGv cmp_tmp = tcg_temp_new();
- DisasJumpType ret;
-
- gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
- ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp);
- return ret;
+ uint64_t imm;
+ TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
+ return gen_bcond_internal(ctx, cond, tmp, imm, disp);
}
static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
{
- TCGv_i64 va, vb, z;
-
- z = load_zero(ctx);
- vb = load_fpr(ctx, rb);
- va = tcg_temp_new();
- gen_fold_mzero(cond, va, load_fpr(ctx, ra));
-
- tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
+ uint64_t imm;
+ TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
+ tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
+ tmp, tcg_constant_i64(imm),
+ load_fpr(ctx, rb), load_fpr(ctx, rc));
}
#define QUAL_RM_N 0x080 /* Round mode nearest even */
@@ -1683,16 +1673,12 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x14:
/* CMOVLBS */
- tmp = tcg_temp_new();
- tcg_gen_andi_i64(tmp, va, 1);
- tcg_gen_movcond_i64(TCG_COND_NE, vc, tmp, load_zero(ctx),
+ tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1),
vb, load_gpr(ctx, rc));
break;
case 0x16:
/* CMOVLBC */
- tmp = tcg_temp_new();
- tcg_gen_andi_i64(tmp, va, 1);
- tcg_gen_movcond_i64(TCG_COND_EQ, vc, tmp, load_zero(ctx),
+ tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1),
vb, load_gpr(ctx, rc));
break;
case 0x20:
@@ -2827,35 +2813,35 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
break;
case 0x38:
/* BLBC */
- ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
+ ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21);
break;
case 0x39:
/* BEQ */
- ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
+ ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21);
break;
case 0x3A:
/* BLT */
- ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
+ ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21);
break;
case 0x3B:
/* BLE */
- ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
+ ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21);
break;
case 0x3C:
/* BLBS */
- ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
+ ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21);
break;
case 0x3D:
/* BNE */
- ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
+ ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21);
break;
case 0x3E:
/* BGE */
- ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
+ ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21);
break;
case 0x3F:
/* BGT */
- ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
+ ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21);
break;
invalid_opc:
ret = gen_invalid(ctx);
diff --git a/target/m68k/translate.c b/target/m68k/translate.c
index f886190..d7d5ff4 100644
--- a/target/m68k/translate.c
+++ b/target/m68k/translate.c
@@ -5129,46 +5129,44 @@ undef:
static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
{
TCGv fpsr;
+ int imm = 0;
- c->v2 = tcg_constant_i32(0);
/* TODO: Raise BSUN exception. */
fpsr = tcg_temp_new();
gen_load_fcr(s, fpsr, M68K_FPSR);
+ c->v1 = fpsr;
+
switch (cond) {
case 0: /* False */
case 16: /* Signaling False */
- c->v1 = c->v2;
c->tcond = TCG_COND_NEVER;
break;
case 1: /* EQual Z */
case 17: /* Signaling EQual Z */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_Z;
+ c->tcond = TCG_COND_TSTNE;
break;
case 2: /* Ordered Greater Than !(A || Z || N) */
case 18: /* Greater Than !(A || Z || N) */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr,
- FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
- c->tcond = TCG_COND_EQ;
+ imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
+ c->tcond = TCG_COND_TSTEQ;
break;
case 3: /* Ordered Greater than or Equal Z || !(A || N) */
case 19: /* Greater than or Equal Z || !(A || N) */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
- tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_Z | FPSR_CC_N);
tcg_gen_or_i32(c->v1, c->v1, fpsr);
tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_Z | FPSR_CC_N;
+ c->tcond = TCG_COND_TSTNE;
break;
case 4: /* Ordered Less Than !(!N || A || Z); */
case 20: /* Less Than !(!N || A || Z); */
c->v1 = tcg_temp_new();
tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
- tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z);
- c->tcond = TCG_COND_EQ;
+ imm = FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z;
+ c->tcond = TCG_COND_TSTEQ;
break;
case 5: /* Ordered Less than or Equal Z || (N && !A) */
case 21: /* Less than or Equal Z || (N && !A) */
@@ -5176,49 +5174,45 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
tcg_gen_andc_i32(c->v1, fpsr, c->v1);
- tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_Z | FPSR_CC_N);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_Z | FPSR_CC_N;
+ c->tcond = TCG_COND_TSTNE;
break;
case 6: /* Ordered Greater or Less than !(A || Z) */
case 22: /* Greater or Less than !(A || Z) */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
- c->tcond = TCG_COND_EQ;
+ imm = FPSR_CC_A | FPSR_CC_Z;
+ c->tcond = TCG_COND_TSTEQ;
break;
case 7: /* Ordered !A */
case 23: /* Greater, Less or Equal !A */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
- c->tcond = TCG_COND_EQ;
+ imm = FPSR_CC_A;
+ c->tcond = TCG_COND_TSTEQ;
break;
case 8: /* Unordered A */
case 24: /* Not Greater, Less or Equal A */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_A;
+ c->tcond = TCG_COND_TSTNE;
break;
case 9: /* Unordered or Equal A || Z */
case 25: /* Not Greater or Less then A || Z */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_A | FPSR_CC_Z;
+ c->tcond = TCG_COND_TSTNE;
break;
case 10: /* Unordered or Greater Than A || !(N || Z)) */
case 26: /* Not Less or Equal A || !(N || Z)) */
c->v1 = tcg_temp_new();
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
- tcg_gen_andi_i32(fpsr, fpsr, FPSR_CC_A | FPSR_CC_N);
tcg_gen_or_i32(c->v1, c->v1, fpsr);
tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_A | FPSR_CC_N;
+ c->tcond = TCG_COND_TSTNE;
break;
case 11: /* Unordered or Greater or Equal A || Z || !N */
case 27: /* Not Less Than A || Z || !N */
c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
- tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
- c->tcond = TCG_COND_NE;
+ tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
+ imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
+ c->tcond = TCG_COND_TSTNE;
break;
case 12: /* Unordered or Less Than A || (N && !Z) */
case 28: /* Not Greater than or Equal A || (N && !Z) */
@@ -5226,27 +5220,25 @@ static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
tcg_gen_andc_i32(c->v1, fpsr, c->v1);
- tcg_gen_andi_i32(c->v1, c->v1, FPSR_CC_A | FPSR_CC_N);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_A | FPSR_CC_N;
+ c->tcond = TCG_COND_TSTNE;
break;
case 13: /* Unordered or Less or Equal A || Z || N */
case 29: /* Not Greater Than A || Z || N */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N);
- c->tcond = TCG_COND_NE;
+ imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
+ c->tcond = TCG_COND_TSTNE;
break;
case 14: /* Not Equal !Z */
case 30: /* Signaling Not Equal !Z */
- c->v1 = tcg_temp_new();
- tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
- c->tcond = TCG_COND_EQ;
+ imm = FPSR_CC_Z;
+ c->tcond = TCG_COND_TSTEQ;
break;
case 15: /* True */
case 31: /* Signaling True */
- c->v1 = c->v2;
c->tcond = TCG_COND_ALWAYS;
break;
}
+ c->v2 = tcg_constant_i32(imm);
}
static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c
index 6e4e1f2..0d0c672 100644
--- a/target/s390x/tcg/translate.c
+++ b/target/s390x/tcg/translate.c
@@ -754,10 +754,10 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_TM_64:
switch (mask) {
case 8:
- cond = TCG_COND_EQ;
+ cond = TCG_COND_TSTEQ;
break;
case 4 | 2 | 1:
- cond = TCG_COND_NE;
+ cond = TCG_COND_TSTNE;
break;
default:
goto do_dynamic;
@@ -768,11 +768,11 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_ICM:
switch (mask) {
case 8:
- cond = TCG_COND_EQ;
+ cond = TCG_COND_TSTEQ;
break;
case 4 | 2 | 1:
case 4 | 2:
- cond = TCG_COND_NE;
+ cond = TCG_COND_TSTNE;
break;
default:
goto do_dynamic;
@@ -854,18 +854,14 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
c->u.s64.a = cc_dst;
c->u.s64.b = tcg_constant_i64(0);
break;
+
case CC_OP_LTGT_64:
case CC_OP_LTUGTU_64:
- c->u.s64.a = cc_src;
- c->u.s64.b = cc_dst;
- break;
-
case CC_OP_TM_32:
case CC_OP_TM_64:
case CC_OP_ICM:
- c->u.s64.a = tcg_temp_new_i64();
- c->u.s64.b = tcg_constant_i64(0);
- tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst);
+ c->u.s64.a = cc_src;
+ c->u.s64.b = cc_dst;
break;
case CC_OP_ADDU:
@@ -889,67 +885,45 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask)
case CC_OP_STATIC:
c->is_64 = false;
c->u.s32.a = cc_op;
- switch (mask) {
- case 0x8 | 0x4 | 0x2: /* cc != 3 */
- cond = TCG_COND_NE;
+
+ /* Fold half of the cases using bit 3 to invert. */
+ switch (mask & 8 ? mask ^ 0xf : mask) {
+ case 0x1: /* cc == 3 */
+ cond = TCG_COND_EQ;
c->u.s32.b = tcg_constant_i32(3);
break;
- case 0x8 | 0x4 | 0x1: /* cc != 2 */
- cond = TCG_COND_NE;
- c->u.s32.b = tcg_constant_i32(2);
- break;
- case 0x8 | 0x2 | 0x1: /* cc != 1 */
- cond = TCG_COND_NE;
- c->u.s32.b = tcg_constant_i32(1);
- break;
- case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */
+ case 0x2: /* cc == 2 */
cond = TCG_COND_EQ;
- c->u.s32.a = tcg_temp_new_i32();
- c->u.s32.b = tcg_constant_i32(0);
- tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
- break;
- case 0x8 | 0x4: /* cc < 2 */
- cond = TCG_COND_LTU;
c->u.s32.b = tcg_constant_i32(2);
break;
- case 0x8: /* cc == 0 */
- cond = TCG_COND_EQ;
- c->u.s32.b = tcg_constant_i32(0);
- break;
- case 0x4 | 0x2 | 0x1: /* cc != 0 */
- cond = TCG_COND_NE;
- c->u.s32.b = tcg_constant_i32(0);
- break;
- case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
- cond = TCG_COND_NE;
- c->u.s32.a = tcg_temp_new_i32();
- c->u.s32.b = tcg_constant_i32(0);
- tcg_gen_andi_i32(c->u.s32.a, cc_op, 1);
- break;
case 0x4: /* cc == 1 */
cond = TCG_COND_EQ;
c->u.s32.b = tcg_constant_i32(1);
break;
- case 0x2 | 0x1: /* cc > 1 */
+ case 0x2 | 0x1: /* cc == 2 || cc == 3 => cc > 1 */
cond = TCG_COND_GTU;
c->u.s32.b = tcg_constant_i32(1);
break;
- case 0x2: /* cc == 2 */
- cond = TCG_COND_EQ;
- c->u.s32.b = tcg_constant_i32(2);
+ case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */
+ cond = TCG_COND_TSTNE;
+ c->u.s32.b = tcg_constant_i32(1);
break;
- case 0x1: /* cc == 3 */
- cond = TCG_COND_EQ;
- c->u.s32.b = tcg_constant_i32(3);
+ case 0x4 | 0x2: /* cc == 1 || cc == 2 => (cc - 1) <= 1 */
+ cond = TCG_COND_LEU;
+ c->u.s32.a = tcg_temp_new_i32();
+ c->u.s32.b = tcg_constant_i32(1);
+ tcg_gen_addi_i32(c->u.s32.a, cc_op, -1);
break;
- default:
- /* CC is masked by something else: (8 >> cc) & mask. */
+ case 0x4 | 0x2 | 0x1: /* cc != 0 */
cond = TCG_COND_NE;
- c->u.s32.a = tcg_temp_new_i32();
c->u.s32.b = tcg_constant_i32(0);
- tcg_gen_shr_i32(c->u.s32.a, tcg_constant_i32(8), cc_op);
- tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask);
break;
+ default:
+ /* case 0: never, handled above. */
+ g_assert_not_reached();
+ }
+ if (mask & 8) {
+ cond = tcg_invert_cond(cond);
}
break;
diff --git a/target/sparc/translate.c b/target/sparc/translate.c
index 7df6f83..d9304a5 100644
--- a/target/sparc/translate.c
+++ b/target/sparc/translate.c
@@ -488,6 +488,7 @@ static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
{
TCGv zero = tcg_constant_tl(0);
+ TCGv one = tcg_constant_tl(1);
TCGv t_src1 = tcg_temp_new();
TCGv t_src2 = tcg_temp_new();
TCGv t0 = tcg_temp_new();
@@ -499,8 +500,7 @@ static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
* if (!(env->y & 1))
* src2 = 0;
*/
- tcg_gen_andi_tl(t0, cpu_y, 0x1);
- tcg_gen_movcond_tl(TCG_COND_EQ, t_src2, t0, zero, zero, t_src2);
+ tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
/*
* b2 = src1 & 1;
diff --git a/tcg/aarch64/tcg-target-con-set.h b/tcg/aarch64/tcg-target-con-set.h
index 3fdee26..44fcc12 100644
--- a/tcg/aarch64/tcg-target-con-set.h
+++ b/tcg/aarch64/tcg-target-con-set.h
@@ -10,7 +10,7 @@
* tcg-target-con-str.h; the constraint combination is inclusive or.
*/
C_O0_I1(r)
-C_O0_I2(r, rA)
+C_O0_I2(r, rC)
C_O0_I2(rZ, r)
C_O0_I2(w, r)
C_O0_I3(rZ, rZ, r)
@@ -22,6 +22,7 @@ C_O1_I2(r, 0, rZ)
C_O1_I2(r, r, r)
C_O1_I2(r, r, rA)
C_O1_I2(r, r, rAL)
+C_O1_I2(r, r, rC)
C_O1_I2(r, r, ri)
C_O1_I2(r, r, rL)
C_O1_I2(r, rZ, rZ)
@@ -31,6 +32,6 @@ C_O1_I2(w, w, wN)
C_O1_I2(w, w, wO)
C_O1_I2(w, w, wZ)
C_O1_I3(w, w, w, w)
-C_O1_I4(r, r, rA, rZ, rZ)
+C_O1_I4(r, r, rC, rZ, rZ)
C_O2_I1(r, r, r)
C_O2_I4(r, r, rZ, rZ, rA, rMZ)
diff --git a/tcg/aarch64/tcg-target-con-str.h b/tcg/aarch64/tcg-target-con-str.h
index fb1a845..48e1722 100644
--- a/tcg/aarch64/tcg-target-con-str.h
+++ b/tcg/aarch64/tcg-target-con-str.h
@@ -16,6 +16,7 @@ REGS('w', ALL_VECTOR_REGS)
* CONST(letter, TCG_CT_CONST_* bit set)
*/
CONST('A', TCG_CT_CONST_AIMM)
+CONST('C', TCG_CT_CONST_CMP)
CONST('L', TCG_CT_CONST_LIMM)
CONST('M', TCG_CT_CONST_MONE)
CONST('O', TCG_CT_CONST_ORRI)
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index a3efa1e..dec8ecc 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -105,6 +105,18 @@ static bool reloc_pc19(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
return false;
}
+static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
+{
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
+ ptrdiff_t offset = target - src_rx;
+
+ if (offset == sextract64(offset, 0, 14)) {
+ *src_rw = deposit32(*src_rw, 5, 14, offset);
+ return true;
+ }
+ return false;
+}
+
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
intptr_t value, intptr_t addend)
{
@@ -115,6 +127,8 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
return reloc_pc26(code_ptr, (const tcg_insn_unit *)value);
case R_AARCH64_CONDBR19:
return reloc_pc19(code_ptr, (const tcg_insn_unit *)value);
+ case R_AARCH64_TSTBR14:
+ return reloc_pc14(code_ptr, (const tcg_insn_unit *)value);
default:
g_assert_not_reached();
}
@@ -126,6 +140,7 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
#define TCG_CT_CONST_MONE 0x800
#define TCG_CT_CONST_ORRI 0x1000
#define TCG_CT_CONST_ANDI 0x2000
+#define TCG_CT_CONST_CMP 0x4000
#define ALL_GENERAL_REGS 0xffffffffu
#define ALL_VECTOR_REGS 0xffffffff00000000ull
@@ -270,7 +285,8 @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
}
}
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -278,6 +294,15 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
if (type == TCG_TYPE_I32) {
val = (int32_t)val;
}
+
+ if (ct & TCG_CT_CONST_CMP) {
+ if (is_tst_cond(cond)) {
+ ct |= TCG_CT_CONST_LIMM;
+ } else {
+ ct |= TCG_CT_CONST_AIMM;
+ }
+ }
+
if ((ct & TCG_CT_CONST_AIMM) && (is_aimm(val) || is_aimm(-val))) {
return 1;
}
@@ -344,6 +369,9 @@ static const enum aarch64_cond_code tcg_cond_to_aarch64[] = {
[TCG_COND_GTU] = COND_HI,
[TCG_COND_GEU] = COND_HS,
[TCG_COND_LEU] = COND_LS,
+ /* bit test */
+ [TCG_COND_TSTEQ] = COND_EQ,
+ [TCG_COND_TSTNE] = COND_NE,
};
typedef enum {
@@ -366,6 +394,10 @@ typedef enum {
/* Conditional branch (immediate). */
I3202_B_C = 0x54000000,
+ /* Test and branch (immediate). */
+ I3205_TBZ = 0x36000000,
+ I3205_TBNZ = 0x37000000,
+
/* Unconditional branch (immediate). */
I3206_B = 0x14000000,
I3206_BL = 0x94000000,
@@ -646,6 +678,14 @@ static void tcg_out_insn_3202(TCGContext *s, AArch64Insn insn,
tcg_out32(s, insn | tcg_cond_to_aarch64[c] | (imm19 & 0x7ffff) << 5);
}
+static void tcg_out_insn_3205(TCGContext *s, AArch64Insn insn,
+ TCGReg rt, int imm6, int imm14)
+{
+ insn |= (imm6 & 0x20) << (31 - 5);
+ insn |= (imm6 & 0x1f) << 19;
+ tcg_out32(s, insn | (imm14 & 0x3fff) << 5 | rt);
+}
+
static void tcg_out_insn_3206(TCGContext *s, AArch64Insn insn, int imm26)
{
tcg_out32(s, insn | (imm26 & 0x03ffffff));
@@ -1341,19 +1381,26 @@ static inline void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd,
tcg_out_bfm(s, ext, rd, rn, a, b);
}
-static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGReg a,
+static void tcg_out_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg a,
tcg_target_long b, bool const_b)
{
- if (const_b) {
- /* Using CMP or CMN aliases. */
- if (b >= 0) {
+ if (is_tst_cond(cond)) {
+ if (!const_b) {
+ tcg_out_insn(s, 3510, ANDS, ext, TCG_REG_XZR, a, b);
+ } else {
+ tcg_debug_assert(is_limm(b));
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, a, b);
+ }
+ } else {
+ if (!const_b) {
+ tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
+ } else if (b >= 0) {
+ tcg_debug_assert(is_aimm(b));
tcg_out_insn(s, 3401, SUBSI, ext, TCG_REG_XZR, a, b);
} else {
+ tcg_debug_assert(is_aimm(-b));
tcg_out_insn(s, 3401, ADDSI, ext, TCG_REG_XZR, a, -b);
}
- } else {
- /* Using CMP alias SUBS wzr, Wn, Wm */
- tcg_out_insn(s, 3502, SUBS, ext, TCG_REG_XZR, a, b);
}
}
@@ -1394,30 +1441,75 @@ static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond c, TCGArg a,
TCGArg b, bool b_const, TCGLabel *l)
{
- intptr_t offset;
- bool need_cmp;
+ int tbit = -1;
+ bool need_cmp = true;
- if (b_const && b == 0 && (c == TCG_COND_EQ || c == TCG_COND_NE)) {
- need_cmp = false;
- } else {
- need_cmp = true;
- tcg_out_cmp(s, ext, a, b, b_const);
+ switch (c) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ /* cmp xN,0; b.ne L -> cbnz xN,L */
+ if (b_const && b == 0) {
+ need_cmp = false;
+ }
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ /* cmp xN,0; b.mi L -> tbnz xN,63,L */
+ if (b_const && b == 0) {
+ c = (c == TCG_COND_LT ? TCG_COND_TSTNE : TCG_COND_TSTEQ);
+ tbit = ext ? 63 : 31;
+ need_cmp = false;
+ }
+ break;
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ /* tst xN,0xffffffff; b.ne L -> cbnz wN,L */
+ if (b_const && b == UINT32_MAX) {
+ ext = TCG_TYPE_I32;
+ need_cmp = false;
+ break;
+ }
+ /* tst xN,1<<B; b.ne L -> tbnz xN,B,L */
+ if (b_const && is_power_of_2(b)) {
+ tbit = ctz64(b);
+ need_cmp = false;
+ }
+ break;
+ default:
+ break;
}
- if (!l->has_value) {
+ if (need_cmp) {
+ tcg_out_cmp(s, ext, c, a, b, b_const);
tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
- offset = tcg_in32(s) >> 5;
- } else {
- offset = tcg_pcrel_diff(s, l->u.value_ptr) >> 2;
- tcg_debug_assert(offset == sextract64(offset, 0, 19));
+ tcg_out_insn(s, 3202, B_C, c, 0);
+ return;
}
- if (need_cmp) {
- tcg_out_insn(s, 3202, B_C, c, offset);
- } else if (c == TCG_COND_EQ) {
- tcg_out_insn(s, 3201, CBZ, ext, a, offset);
+ if (tbit >= 0) {
+ tcg_out_reloc(s, s->code_ptr, R_AARCH64_TSTBR14, l, 0);
+ switch (c) {
+ case TCG_COND_TSTEQ:
+ tcg_out_insn(s, 3205, TBZ, a, tbit, 0);
+ break;
+ case TCG_COND_TSTNE:
+ tcg_out_insn(s, 3205, TBNZ, a, tbit, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
} else {
- tcg_out_insn(s, 3201, CBNZ, ext, a, offset);
+ tcg_out_reloc(s, s->code_ptr, R_AARCH64_CONDBR19, l, 0);
+ switch (c) {
+ case TCG_COND_EQ:
+ tcg_out_insn(s, 3201, CBZ, ext, a, 0);
+ break;
+ case TCG_COND_NE:
+ tcg_out_insn(s, 3201, CBNZ, ext, a, 0);
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
}
@@ -1574,7 +1666,7 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d,
} else {
AArch64Insn sel = I3506_CSEL;
- tcg_out_cmp(s, ext, a0, 0, 1);
+ tcg_out_cmp(s, ext, TCG_COND_NE, a0, 0, 1);
tcg_out_insn(s, 3507, CLZ, ext, TCG_REG_TMP0, a1);
if (const_b) {
@@ -1719,7 +1811,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
addr_adj, compare_mask);
/* Perform the address comparison. */
- tcg_out_cmp(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2, 0);
+ tcg_out_cmp(s, addr_type, TCG_COND_NE, TCG_REG_TMP0, TCG_REG_TMP2, 0);
/* If not equal, we jump to the slow path. */
ldst->label_ptr[0] = s->code_ptr;
@@ -2275,7 +2367,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
a2 = (int32_t)a2;
/* FALLTHRU */
case INDEX_op_setcond_i64:
- tcg_out_cmp(s, ext, a1, a2, c2);
+ tcg_out_cmp(s, ext, args[3], a1, a2, c2);
/* Use CSET alias of CSINC Wd, WZR, WZR, invert(cond). */
tcg_out_insn(s, 3506, CSINC, TCG_TYPE_I32, a0, TCG_REG_XZR,
TCG_REG_XZR, tcg_invert_cond(args[3]));
@@ -2285,7 +2377,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
a2 = (int32_t)a2;
/* FALLTHRU */
case INDEX_op_negsetcond_i64:
- tcg_out_cmp(s, ext, a1, a2, c2);
+ tcg_out_cmp(s, ext, args[3], a1, a2, c2);
/* Use CSETM alias of CSINV Wd, WZR, WZR, invert(cond). */
tcg_out_insn(s, 3506, CSINV, ext, a0, TCG_REG_XZR,
TCG_REG_XZR, tcg_invert_cond(args[3]));
@@ -2295,7 +2387,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
a2 = (int32_t)a2;
/* FALLTHRU */
case INDEX_op_movcond_i64:
- tcg_out_cmp(s, ext, a1, a2, c2);
+ tcg_out_cmp(s, ext, args[5], a1, a2, c2);
tcg_out_insn(s, 3506, CSEL, ext, a0, REG0(3), REG0(4), args[5]);
break;
@@ -2895,11 +2987,13 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_add_i64:
case INDEX_op_sub_i32:
case INDEX_op_sub_i64:
+ return C_O1_I2(r, r, rA);
+
case INDEX_op_setcond_i32:
case INDEX_op_setcond_i64:
case INDEX_op_negsetcond_i32:
case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, r, rA);
+ return C_O1_I2(r, r, rC);
case INDEX_op_mul_i32:
case INDEX_op_mul_i64:
@@ -2949,11 +3043,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
- return C_O0_I2(r, rA);
+ return C_O0_I2(r, rC);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rA, rZ, rZ);
+ return C_O1_I4(r, r, rC, rZ, rZ);
case INDEX_op_qemu_ld_a32_i32:
case INDEX_op_qemu_ld_a64_i32:
diff --git a/tcg/aarch64/tcg-target.h b/tcg/aarch64/tcg-target.h
index 33f15a5..ef5ebe9 100644
--- a/tcg/aarch64/tcg-target.h
+++ b/tcg/aarch64/tcg-target.h
@@ -138,6 +138,8 @@ typedef enum {
#define TCG_TARGET_HAS_qemu_ldst_i128 1
#endif
+#define TCG_TARGET_HAS_tst 1
+
#define TCG_TARGET_HAS_v64 1
#define TCG_TARGET_HAS_v128 1
#define TCG_TARGET_HAS_v256 0
diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc
index a9aa8aa..ffd23ef 100644
--- a/tcg/arm/tcg-target.c.inc
+++ b/tcg/arm/tcg-target.c.inc
@@ -501,7 +501,8 @@ static bool is_shimm1632(uint32_t v32, int *cmode, int *imm8)
* mov operand2: values represented with x << (2 * y), x < 0x100
* add, sub, eor...: ditto
*/
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -1190,6 +1191,33 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
}
}
+static TCGCond tcg_out_cmp(TCGContext *s, TCGCond cond, TCGReg a,
+ TCGArg b, int b_const)
+{
+ if (!is_tst_cond(cond)) {
+ tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0, a, b, b_const);
+ return cond;
+ }
+
+ cond = tcg_tst_eqne_cond(cond);
+ if (b_const) {
+ int imm12 = encode_imm(b);
+
+ /*
+ * The compare constraints allow rIN, but TST does not support N.
+ * Be prepared to load the constant into a scratch register.
+ */
+ if (imm12 >= 0) {
+ tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, a, imm12);
+ return cond;
+ }
+ tcg_out_movi32(s, COND_AL, TCG_REG_TMP, b);
+ b = TCG_REG_TMP;
+ }
+ tcg_out_dat_reg(s, COND_AL, ARITH_TST, 0, a, b, SHIFT_IMM_LSL(0));
+ return cond;
+}
+
static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
const int *const_args)
{
@@ -1217,6 +1245,13 @@ static TCGCond tcg_out_cmp2(TCGContext *s, const TCGArg *args,
tcg_out_dat_rI(s, COND_EQ, ARITH_CMP, 0, al, bl, const_bl);
return cond;
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ /* Similar, but with TST instead of CMP. */
+ tcg_out_dat_rI(s, COND_AL, ARITH_TST, 0, ah, bh, const_bh);
+ tcg_out_dat_rI(s, COND_EQ, ARITH_TST, 0, al, bl, const_bl);
+ return tcg_tst_eqne_cond(cond);
+
case TCG_COND_LT:
case TCG_COND_GE:
/* We perform a double-word subtraction and examine the result.
@@ -1808,9 +1843,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
/* Constraints mean that v2 is always in the same register as dest,
* so we only need to do "if condition passed, move v1 to dest".
*/
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
- args[1], args[2], const_args[2]);
- tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
+ c = tcg_out_cmp(s, args[5], args[1], args[2], const_args[2]);
+ tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[c], ARITH_MOV,
ARITH_MVN, args[0], 0, args[3], const_args[3]);
break;
case INDEX_op_add_i32:
@@ -1960,25 +1994,21 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_brcond_i32:
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
- args[0], args[1], const_args[1]);
- tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]],
- arg_label(args[3]));
+ c = tcg_out_cmp(s, args[2], args[0], args[1], const_args[1]);
+ tcg_out_goto_label(s, tcg_cond_to_arm_cond[c], arg_label(args[3]));
break;
case INDEX_op_setcond_i32:
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
- args[1], args[2], const_args[2]);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
+ c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
ARITH_MOV, args[0], 0, 1);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
ARITH_MOV, args[0], 0, 0);
break;
case INDEX_op_negsetcond_i32:
- tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
- args[1], args[2], const_args[2]);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
+ c = tcg_out_cmp(s, args[3], args[1], args[2], const_args[2]);
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[c],
ARITH_MVN, args[0], 0, 0);
- tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
+ tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(c)],
ARITH_MOV, args[0], 0, 0);
break;
diff --git a/tcg/arm/tcg-target.h b/tcg/arm/tcg-target.h
index a712cc8..a43875c 100644
--- a/tcg/arm/tcg-target.h
+++ b/tcg/arm/tcg-target.h
@@ -125,6 +125,8 @@ extern bool use_neon_instructions;
#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 1
+
#define TCG_TARGET_HAS_v64 use_neon_instructions
#define TCG_TARGET_HAS_v128 use_neon_instructions
#define TCG_TARGET_HAS_v256 0
diff --git a/tcg/i386/tcg-target-con-set.h b/tcg/i386/tcg-target-con-set.h
index 7d00a7d..e24241c 100644
--- a/tcg/i386/tcg-target-con-set.h
+++ b/tcg/i386/tcg-target-con-set.h
@@ -20,7 +20,7 @@ C_O0_I2(L, L)
C_O0_I2(qi, r)
C_O0_I2(re, r)
C_O0_I2(ri, r)
-C_O0_I2(r, re)
+C_O0_I2(r, reT)
C_O0_I2(s, L)
C_O0_I2(x, r)
C_O0_I3(L, L, L)
@@ -34,7 +34,7 @@ C_O1_I1(r, r)
C_O1_I1(x, r)
C_O1_I1(x, x)
C_O1_I2(q, 0, qi)
-C_O1_I2(q, r, re)
+C_O1_I2(q, r, reT)
C_O1_I2(r, 0, ci)
C_O1_I2(r, 0, r)
C_O1_I2(r, 0, re)
@@ -50,7 +50,7 @@ C_N1_I2(r, r, r)
C_N1_I2(r, r, rW)
C_O1_I3(x, 0, x, x)
C_O1_I3(x, x, x, x)
-C_O1_I4(r, r, re, r, 0)
+C_O1_I4(r, r, reT, r, 0)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, L)
C_O2_I2(a, d, a, r)
diff --git a/tcg/i386/tcg-target-con-str.h b/tcg/i386/tcg-target-con-str.h
index 95a30e5..cc22db2 100644
--- a/tcg/i386/tcg-target-con-str.h
+++ b/tcg/i386/tcg-target-con-str.h
@@ -28,5 +28,6 @@ REGS('s', ALL_BYTEL_REGS & ~SOFTMMU_RESERVE_REGS) /* qemu_st8_i32 data */
*/
CONST('e', TCG_CT_CONST_S32)
CONST('I', TCG_CT_CONST_I32)
+CONST('T', TCG_CT_CONST_TST)
CONST('W', TCG_CT_CONST_WSZ)
CONST('Z', TCG_CT_CONST_U32)
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index d268199..c6ba498 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -132,6 +132,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define TCG_CT_CONST_U32 0x200
#define TCG_CT_CONST_I32 0x400
#define TCG_CT_CONST_WSZ 0x800
+#define TCG_CT_CONST_TST 0x1000
/* Registers used with L constraint, which are the first argument
registers on x86_64, and two random call clobbered registers on
@@ -195,13 +196,15 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
}
if (type == TCG_TYPE_I32) {
- if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 | TCG_CT_CONST_I32)) {
+ if (ct & (TCG_CT_CONST_S32 | TCG_CT_CONST_U32 |
+ TCG_CT_CONST_I32 | TCG_CT_CONST_TST)) {
return 1;
}
} else {
@@ -214,6 +217,17 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
return 1;
}
+ /*
+ * This will be used in combination with TCG_CT_CONST_S32,
+ * so "normal" TESTQ is already matched. Also accept:
+ * TESTQ -> TESTL (uint32_t)
+ * TESTQ -> BT (is_power_of_2)
+ */
+ if ((ct & TCG_CT_CONST_TST)
+ && is_tst_cond(cond)
+ && (val == (uint32_t)val || is_power_of_2(val))) {
+ return 1;
+ }
}
if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
return 1;
@@ -395,6 +409,7 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
#define OPC_SHRD_Ib (0xac | P_EXT)
+#define OPC_TESTB (0x84)
#define OPC_TESTL (0x85)
#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
#define OPC_UD2 (0x0b | P_EXT)
@@ -441,6 +456,12 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
#define OPC_GRP3_Ev (0xf7)
#define OPC_GRP5 (0xff)
#define OPC_GRP14 (0x73 | P_EXT | P_DATA16)
+#define OPC_GRPBT (0xba | P_EXT)
+
+#define OPC_GRPBT_BT 4
+#define OPC_GRPBT_BTS 5
+#define OPC_GRPBT_BTR 6
+#define OPC_GRPBT_BTC 7
/* Group 1 opcode extensions for 0x80-0x83.
These are also used as modifiers for OPC_ARITH. */
@@ -505,6 +526,8 @@ static const uint8_t tcg_cond_to_jcc[] = {
[TCG_COND_GEU] = JCC_JAE,
[TCG_COND_LEU] = JCC_JBE,
[TCG_COND_GTU] = JCC_JA,
+ [TCG_COND_TSTEQ] = JCC_JE,
+ [TCG_COND_TSTNE] = JCC_JNE,
};
#if TCG_TARGET_REG_BITS == 64
@@ -1448,27 +1471,101 @@ static void tcg_out_jxx(TCGContext *s, int opc, TCGLabel *l, bool small)
}
}
-static void tcg_out_cmp(TCGContext *s, TCGArg arg1, TCGArg arg2,
- int const_arg2, int rexw)
+static int tcg_out_cmp(TCGContext *s, TCGCond cond, TCGArg arg1,
+ TCGArg arg2, int const_arg2, int rexw)
{
- if (const_arg2) {
- if (arg2 == 0) {
- /* test r, r */
+ int jz, js;
+
+ if (!is_tst_cond(cond)) {
+ if (!const_arg2) {
+ tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
+ } else if (arg2 == 0) {
tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg1);
} else {
+ tcg_debug_assert(!rexw || arg2 == (int32_t)arg2);
tgen_arithi(s, ARITH_CMP + rexw, arg1, arg2, 0);
}
- } else {
- tgen_arithr(s, ARITH_CMP + rexw, arg1, arg2);
+ return tcg_cond_to_jcc[cond];
+ }
+
+ jz = tcg_cond_to_jcc[cond];
+ js = (cond == TCG_COND_TSTNE ? JCC_JS : JCC_JNS);
+
+ if (!const_arg2) {
+ tcg_out_modrm(s, OPC_TESTL + rexw, arg1, arg2);
+ return jz;
+ }
+
+ if (arg2 <= 0xff && (TCG_TARGET_REG_BITS == 64 || arg1 < 4)) {
+ if (arg2 == 0x80) {
+ tcg_out_modrm(s, OPC_TESTB | P_REXB_R, arg1, arg1);
+ return js;
+ }
+ if (arg2 == 0xff) {
+ tcg_out_modrm(s, OPC_TESTB | P_REXB_R, arg1, arg1);
+ return jz;
+ }
+ tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, arg1);
+ tcg_out8(s, arg2);
+ return jz;
+ }
+
+ if ((arg2 & ~0xff00) == 0 && arg1 < 4) {
+ if (arg2 == 0x8000) {
+ tcg_out_modrm(s, OPC_TESTB, arg1 + 4, arg1 + 4);
+ return js;
+ }
+ if (arg2 == 0xff00) {
+ tcg_out_modrm(s, OPC_TESTB, arg1 + 4, arg1 + 4);
+ return jz;
+ }
+ tcg_out_modrm(s, OPC_GRP3_Eb, EXT3_TESTi, arg1 + 4);
+ tcg_out8(s, arg2 >> 8);
+ return jz;
+ }
+
+ if (arg2 == 0xffff) {
+ tcg_out_modrm(s, OPC_TESTL | P_DATA16, arg1, arg1);
+ return jz;
}
+ if (arg2 == 0xffffffffu) {
+ tcg_out_modrm(s, OPC_TESTL, arg1, arg1);
+ return jz;
+ }
+
+ if (is_power_of_2(rexw ? arg2 : (uint32_t)arg2)) {
+ int jc = (cond == TCG_COND_TSTNE ? JCC_JB : JCC_JAE);
+ int sh = ctz64(arg2);
+
+ rexw = (sh & 32 ? P_REXW : 0);
+ if ((sh & 31) == 31) {
+ tcg_out_modrm(s, OPC_TESTL | rexw, arg1, arg1);
+ return js;
+ } else {
+ tcg_out_modrm(s, OPC_GRPBT | rexw, OPC_GRPBT_BT, arg1);
+ tcg_out8(s, sh);
+ return jc;
+ }
+ }
+
+ if (rexw) {
+ if (arg2 == (uint32_t)arg2) {
+ rexw = 0;
+ } else {
+ tcg_debug_assert(arg2 == (int32_t)arg2);
+ }
+ }
+ tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_TESTi, arg1);
+ tcg_out32(s, arg2);
+ return jz;
}
static void tcg_out_brcond(TCGContext *s, int rexw, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
TCGLabel *label, bool small)
{
- tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
- tcg_out_jxx(s, tcg_cond_to_jcc[cond], label, small);
+ int jcc = tcg_out_cmp(s, cond, arg1, arg2, const_arg2, rexw);
+ tcg_out_jxx(s, jcc, label, small);
}
#if TCG_TARGET_REG_BITS == 32
@@ -1477,18 +1574,21 @@ static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
{
TCGLabel *label_next = gen_new_label();
TCGLabel *label_this = arg_label(args[5]);
+ TCGCond cond = args[4];
- switch(args[4]) {
+ switch (cond) {
case TCG_COND_EQ:
- tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2],
- label_next, 1);
- tcg_out_brcond(s, 0, TCG_COND_EQ, args[1], args[3], const_args[3],
+ case TCG_COND_TSTEQ:
+ tcg_out_brcond(s, 0, tcg_invert_cond(cond),
+ args[0], args[2], const_args[2], label_next, 1);
+ tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
label_this, small);
break;
case TCG_COND_NE:
- tcg_out_brcond(s, 0, TCG_COND_NE, args[0], args[2], const_args[2],
+ case TCG_COND_TSTNE:
+ tcg_out_brcond(s, 0, cond, args[0], args[2], const_args[2],
label_this, small);
- tcg_out_brcond(s, 0, TCG_COND_NE, args[1], args[3], const_args[3],
+ tcg_out_brcond(s, 0, cond, args[1], args[3], const_args[3],
label_this, small);
break;
case TCG_COND_LT:
@@ -1560,6 +1660,7 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
{
bool inv = false;
bool cleared;
+ int jcc;
switch (cond) {
case TCG_COND_NE:
@@ -1596,7 +1697,7 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
* We can then use NEG or INC to produce the desired result.
* This is always smaller than the SETCC expansion.
*/
- tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
+ tcg_out_cmp(s, TCG_COND_LTU, arg1, arg2, const_arg2, rexw);
/* X - X - C = -C = (C ? -1 : 0) */
tgen_arithr(s, ARITH_SBB + (neg ? rexw : 0), dest, dest);
@@ -1643,8 +1744,8 @@ static void tcg_out_setcond(TCGContext *s, int rexw, TCGCond cond,
cleared = true;
}
- tcg_out_cmp(s, arg1, arg2, const_arg2, rexw);
- tcg_out_modrm(s, OPC_SETCC | tcg_cond_to_jcc[cond], 0, dest);
+ jcc = tcg_out_cmp(s, cond, arg1, arg2, const_arg2, rexw);
+ tcg_out_modrm(s, OPC_SETCC | jcc, 0, dest);
if (!cleared) {
tcg_out_ext8u(s, dest, dest);
@@ -1698,14 +1799,14 @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
}
#endif
-static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
+static void tcg_out_cmov(TCGContext *s, int jcc, int rexw,
TCGReg dest, TCGReg v1)
{
if (have_cmov) {
- tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
+ tcg_out_modrm(s, OPC_CMOVCC | jcc | rexw, dest, v1);
} else {
TCGLabel *over = gen_new_label();
- tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
+ tcg_out_jxx(s, jcc ^ 1, over, 1);
tcg_out_mov(s, TCG_TYPE_I32, dest, v1);
tcg_out_label(s, over);
}
@@ -1715,8 +1816,8 @@ static void tcg_out_movcond(TCGContext *s, int rexw, TCGCond cond,
TCGReg dest, TCGReg c1, TCGArg c2, int const_c2,
TCGReg v1)
{
- tcg_out_cmp(s, c1, c2, const_c2, rexw);
- tcg_out_cmov(s, cond, rexw, dest, v1);
+ int jcc = tcg_out_cmp(s, cond, c1, c2, const_c2, rexw);
+ tcg_out_cmov(s, jcc, rexw, dest, v1);
}
static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
@@ -1728,12 +1829,12 @@ static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
tcg_debug_assert(arg2 == (rexw ? 64 : 32));
} else {
tcg_debug_assert(dest != arg2);
- tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
+ tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
}
} else {
tcg_debug_assert(dest != arg2);
tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
- tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
+ tcg_out_cmov(s, JCC_JE, rexw, dest, arg2);
}
}
@@ -1746,7 +1847,7 @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
tcg_debug_assert(arg2 == (rexw ? 64 : 32));
} else {
tcg_debug_assert(dest != arg2);
- tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
+ tcg_out_cmov(s, JCC_JB, rexw, dest, arg2);
}
} else {
tcg_debug_assert(!const_a2);
@@ -1758,8 +1859,8 @@ static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
tgen_arithi(s, ARITH_XOR + rexw, dest, rexw ? 63 : 31, 0);
/* Since we have destroyed the flags from BSR, we have to re-test. */
- tcg_out_cmp(s, arg1, 0, 1, rexw);
- tcg_out_cmov(s, TCG_COND_EQ, rexw, dest, arg2);
+ int jcc = tcg_out_cmp(s, TCG_COND_EQ, arg1, 0, 1, rexw);
+ tcg_out_cmov(s, jcc, rexw, dest, arg2);
}
}
@@ -1824,23 +1925,6 @@ static void tcg_out_nopn(TCGContext *s, int n)
tcg_out8(s, 0x90);
}
-/* Test register R vs immediate bits I, setting Z flag for EQ/NE. */
-static void __attribute__((unused))
-tcg_out_testi(TCGContext *s, TCGReg r, uint32_t i)
-{
- /*
- * This is used for testing alignment, so we can usually use testb.
- * For i686, we have to use testl for %esi/%edi.
- */
- if (i <= 0xff && (TCG_TARGET_REG_BITS == 64 || r < 4)) {
- tcg_out_modrm(s, OPC_GRP3_Eb | P_REXB_RM, EXT3_TESTi, r);
- tcg_out8(s, i);
- } else {
- tcg_out_modrm(s, OPC_GRP3_Ev, EXT3_TESTi, r);
- tcg_out32(s, i);
- }
-}
-
typedef struct {
TCGReg base;
int index;
@@ -2101,16 +2185,17 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
offsetof(CPUTLBEntry, addend));
} else if (a_mask) {
- ldst = new_ldst_label(s);
+ int jcc;
+ ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
ldst->oi = oi;
ldst->addrlo_reg = addrlo;
ldst->addrhi_reg = addrhi;
- tcg_out_testi(s, addrlo, a_mask);
/* jne slow_path */
- tcg_out_opc(s, OPC_JCC_long + JCC_JNE, 0, 0, 0);
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, addrlo, a_mask, true, false);
+ tcg_out_opc(s, OPC_JCC_long + jcc, 0, 0, 0);
ldst->label_ptr[0] = s->code_ptr;
s->code_ptr += 4;
}
@@ -2256,9 +2341,10 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
} else {
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
+ int jcc;
- tcg_out_testi(s, h.base, 15);
- tcg_out_jxx(s, JCC_JNE, l1, true);
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, h.base, 15, true, false);
+ tcg_out_jxx(s, jcc, l1, true);
tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_VxWx + h.seg,
TCG_TMP_VEC, 0,
@@ -2384,9 +2470,10 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
} else {
TCGLabel *l1 = gen_new_label();
TCGLabel *l2 = gen_new_label();
+ int jcc;
- tcg_out_testi(s, h.base, 15);
- tcg_out_jxx(s, JCC_JNE, l1, true);
+ jcc = tcg_out_cmp(s, TCG_COND_TSTNE, h.base, 15, true, false);
+ tcg_out_jxx(s, jcc, l1, true);
tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_WxVx + h.seg,
TCG_TMP_VEC, 0,
@@ -3373,7 +3460,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
- return C_O0_I2(r, re);
+ return C_O0_I2(r, reT);
case INDEX_op_bswap16_i32:
case INDEX_op_bswap16_i64:
@@ -3421,11 +3508,11 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_setcond_i64:
case INDEX_op_negsetcond_i32:
case INDEX_op_negsetcond_i64:
- return C_O1_I2(q, r, re);
+ return C_O1_I2(q, r, reT);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, re, r, 0);
+ return C_O1_I4(r, r, reT, r, 0);
case INDEX_op_div2_i32:
case INDEX_op_div2_i64:
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index fa34dee..a10d4e1 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -198,6 +198,8 @@ typedef enum {
#define TCG_TARGET_HAS_qemu_ldst_i128 \
(TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
+#define TCG_TARGET_HAS_tst 1
+
/* We do not support older SSE systems, only beginning with AVX1. */
#define TCG_TARGET_HAS_v64 have_avx1
#define TCG_TARGET_HAS_v128 have_avx1
diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index dcf0205..69c5b8a 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -183,7 +183,8 @@ static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
if (ct & TCG_CT_CONST) {
return true;
diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h
index 9c70ebf..fede627 100644
--- a/tcg/loongarch64/tcg-target.h
+++ b/tcg/loongarch64/tcg-target.h
@@ -169,6 +169,8 @@ typedef enum {
#define TCG_TARGET_HAS_qemu_ldst_i128 (cpuinfo & CPUINFO_LSX)
+#define TCG_TARGET_HAS_tst 0
+
#define TCG_TARGET_HAS_v64 0
#define TCG_TARGET_HAS_v128 (cpuinfo & CPUINFO_LSX)
#define TCG_TARGET_HAS_v256 0
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index 8328dbd..3b5b5c6 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -188,7 +188,8 @@ static bool is_p2m1(tcg_target_long val)
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index b98ffae..a996aa1 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -194,6 +194,8 @@ extern bool use_mips32r2_instructions;
#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 0
+
#define TCG_TARGET_DEFAULT_MO 0
#define TCG_TARGET_NEED_LDST_LABELS
#define TCG_TARGET_NEED_POOL_LABELS
diff --git a/tcg/optimize.c b/tcg/optimize.c
index f2d0165..79e7016 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -124,11 +124,22 @@ static inline bool ts_is_const(TCGTemp *ts)
return ts_info(ts)->is_const;
}
+static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
+{
+ TempOptInfo *ti = ts_info(ts);
+ return ti->is_const && ti->val == val;
+}
+
static inline bool arg_is_const(TCGArg arg)
{
return ts_is_const(arg_temp(arg));
}
+static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
+{
+ return ts_is_const_val(arg_temp(arg), val);
+}
+
static inline bool ts_is_copy(TCGTemp *ts)
{
return ts_info(ts)->next_copy != ts;
@@ -353,6 +364,13 @@ static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
return temp_arg(ts);
}
+static TCGArg arg_new_temp(OptContext *ctx)
+{
+ TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
+ init_ts_info(ctx, ts);
+ return temp_arg(ts);
+}
+
static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
{
TCGTemp *dst_ts = arg_temp(dst);
@@ -614,9 +632,15 @@ static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
return x <= y;
case TCG_COND_GTU:
return x > y;
- default:
- g_assert_not_reached();
+ case TCG_COND_TSTEQ:
+ return (x & y) == 0;
+ case TCG_COND_TSTNE:
+ return (x & y) != 0;
+ case TCG_COND_ALWAYS:
+ case TCG_COND_NEVER:
+ break;
}
+ g_assert_not_reached();
}
static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
@@ -642,12 +666,18 @@ static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
return x <= y;
case TCG_COND_GTU:
return x > y;
- default:
- g_assert_not_reached();
+ case TCG_COND_TSTEQ:
+ return (x & y) == 0;
+ case TCG_COND_TSTNE:
+ return (x & y) != 0;
+ case TCG_COND_ALWAYS:
+ case TCG_COND_NEVER:
+ break;
}
+ g_assert_not_reached();
}
-static bool do_constant_folding_cond_eq(TCGCond c)
+static int do_constant_folding_cond_eq(TCGCond c)
{
switch (c) {
case TCG_COND_GT:
@@ -662,9 +692,14 @@ static bool do_constant_folding_cond_eq(TCGCond c)
case TCG_COND_LEU:
case TCG_COND_EQ:
return 1;
- default:
- g_assert_not_reached();
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ return -1;
+ case TCG_COND_ALWAYS:
+ case TCG_COND_NEVER:
+ break;
}
+ g_assert_not_reached();
}
/*
@@ -689,11 +724,13 @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
}
} else if (args_are_copies(x, y)) {
return do_constant_folding_cond_eq(c);
- } else if (arg_is_const(y) && arg_info(y)->val == 0) {
+ } else if (arg_is_const_val(y, 0)) {
switch (c) {
case TCG_COND_LTU:
+ case TCG_COND_TSTNE:
return 0;
case TCG_COND_GEU:
+ case TCG_COND_TSTEQ:
return 1;
default:
return -1;
@@ -702,43 +739,6 @@ static int do_constant_folding_cond(TCGType type, TCGArg x,
return -1;
}
-/*
- * Return -1 if the condition can't be simplified,
- * and the result of the condition (0 or 1) if it can.
- */
-static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c)
-{
- TCGArg al = p1[0], ah = p1[1];
- TCGArg bl = p2[0], bh = p2[1];
-
- if (arg_is_const(bl) && arg_is_const(bh)) {
- tcg_target_ulong blv = arg_info(bl)->val;
- tcg_target_ulong bhv = arg_info(bh)->val;
- uint64_t b = deposit64(blv, 32, 32, bhv);
-
- if (arg_is_const(al) && arg_is_const(ah)) {
- tcg_target_ulong alv = arg_info(al)->val;
- tcg_target_ulong ahv = arg_info(ah)->val;
- uint64_t a = deposit64(alv, 32, 32, ahv);
- return do_constant_folding_cond_64(a, b, c);
- }
- if (b == 0) {
- switch (c) {
- case TCG_COND_LTU:
- return 0;
- case TCG_COND_GEU:
- return 1;
- default:
- break;
- }
- }
- }
- if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
- return do_constant_folding_cond_eq(c);
- }
- return -1;
-}
-
/**
* swap_commutative:
* @dest: TCGArg of the destination argument, or NO_DEST.
@@ -785,6 +785,166 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
return false;
}
+/*
+ * Return -1 if the condition can't be simplified,
+ * and the result of the condition (0 or 1) if it can.
+ */
+static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
+ TCGArg *p1, TCGArg *p2, TCGArg *pcond)
+{
+ TCGCond cond;
+ bool swap;
+ int r;
+
+ swap = swap_commutative(dest, p1, p2);
+ cond = *pcond;
+ if (swap) {
+ *pcond = cond = tcg_swap_cond(cond);
+ }
+
+ r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
+ if (r >= 0) {
+ return r;
+ }
+ if (!is_tst_cond(cond)) {
+ return -1;
+ }
+
+ /*
+ * TSTNE x,x -> NE x,0
+ * TSTNE x,-1 -> NE x,0
+ */
+ if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) {
+ *p2 = arg_new_constant(ctx, 0);
+ *pcond = tcg_tst_eqne_cond(cond);
+ return -1;
+ }
+
+ /* TSTNE x,sign -> LT x,0 */
+ if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32
+ ? INT32_MIN : INT64_MIN))) {
+ *p2 = arg_new_constant(ctx, 0);
+ *pcond = tcg_tst_ltge_cond(cond);
+ return -1;
+ }
+
+ /* Expand to AND with a temporary if no backend support. */
+ if (!TCG_TARGET_HAS_tst) {
+ TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
+ ? INDEX_op_and_i32 : INDEX_op_and_i64);
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
+ TCGArg tmp = arg_new_temp(ctx);
+
+ op2->args[0] = tmp;
+ op2->args[1] = *p1;
+ op2->args[2] = *p2;
+
+ *p1 = tmp;
+ *p2 = arg_new_constant(ctx, 0);
+ *pcond = tcg_tst_eqne_cond(cond);
+ }
+ return -1;
+}
+
+static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
+{
+ TCGArg al, ah, bl, bh;
+ TCGCond c;
+ bool swap;
+ int r;
+
+ swap = swap_commutative2(args, args + 2);
+ c = args[4];
+ if (swap) {
+ args[4] = c = tcg_swap_cond(c);
+ }
+
+ al = args[0];
+ ah = args[1];
+ bl = args[2];
+ bh = args[3];
+
+ if (arg_is_const(bl) && arg_is_const(bh)) {
+ tcg_target_ulong blv = arg_info(bl)->val;
+ tcg_target_ulong bhv = arg_info(bh)->val;
+ uint64_t b = deposit64(blv, 32, 32, bhv);
+
+ if (arg_is_const(al) && arg_is_const(ah)) {
+ tcg_target_ulong alv = arg_info(al)->val;
+ tcg_target_ulong ahv = arg_info(ah)->val;
+ uint64_t a = deposit64(alv, 32, 32, ahv);
+
+ r = do_constant_folding_cond_64(a, b, c);
+ if (r >= 0) {
+ return r;
+ }
+ }
+
+ if (b == 0) {
+ switch (c) {
+ case TCG_COND_LTU:
+ case TCG_COND_TSTNE:
+ return 0;
+ case TCG_COND_GEU:
+ case TCG_COND_TSTEQ:
+ return 1;
+ default:
+ break;
+ }
+ }
+
+ /* TSTNE x,-1 -> NE x,0 */
+ if (b == -1 && is_tst_cond(c)) {
+ args[3] = args[2] = arg_new_constant(ctx, 0);
+ args[4] = tcg_tst_eqne_cond(c);
+ return -1;
+ }
+
+ /* TSTNE x,sign -> LT x,0 */
+ if (b == INT64_MIN && is_tst_cond(c)) {
+ /* bl must be 0, so copy that to bh */
+ args[3] = bl;
+ args[4] = tcg_tst_ltge_cond(c);
+ return -1;
+ }
+ }
+
+ if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
+ r = do_constant_folding_cond_eq(c);
+ if (r >= 0) {
+ return r;
+ }
+
+ /* TSTNE x,x -> NE x,0 */
+ if (is_tst_cond(c)) {
+ args[3] = args[2] = arg_new_constant(ctx, 0);
+ args[4] = tcg_tst_eqne_cond(c);
+ return -1;
+ }
+ }
+
+ /* Expand to AND with a temporary if no backend support. */
+ if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
+ TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
+ TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
+ TCGArg t1 = arg_new_temp(ctx);
+ TCGArg t2 = arg_new_temp(ctx);
+
+ op1->args[0] = t1;
+ op1->args[1] = al;
+ op1->args[2] = bl;
+ op2->args[0] = t2;
+ op2->args[1] = ah;
+ op2->args[2] = bh;
+
+ args[0] = t1;
+ args[1] = t2;
+ args[3] = args[2] = arg_new_constant(ctx, 0);
+ args[4] = tcg_tst_eqne_cond(c);
+ }
+ return -1;
+}
+
static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
{
for (int i = 0; i < nb_args; i++) {
@@ -954,7 +1114,7 @@ static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
/* If the binary operation has first argument @i, fold to @i. */
static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
- if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
+ if (arg_is_const_val(op->args[1], i)) {
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
return false;
@@ -963,7 +1123,7 @@ static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
/* If the binary operation has first argument @i, fold to NOT. */
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
{
- if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
+ if (arg_is_const_val(op->args[1], i)) {
return fold_to_not(ctx, op, 2);
}
return false;
@@ -972,7 +1132,7 @@ static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
/* If the binary operation has second argument @i, fold to @i. */
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
{
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ if (arg_is_const_val(op->args[2], i)) {
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
return false;
@@ -981,7 +1141,7 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
/* If the binary operation has second argument @i, fold to identity. */
static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
{
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ if (arg_is_const_val(op->args[2], i)) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
}
return false;
@@ -990,7 +1150,7 @@ static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
/* If the binary operation has second argument @i, fold to NOT. */
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
{
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
+ if (arg_is_const_val(op->args[2], i)) {
return fold_to_not(ctx, op, 1);
}
return false;
@@ -1182,14 +1342,8 @@ static bool fold_andc(OptContext *ctx, TCGOp *op)
static bool fold_brcond(OptContext *ctx, TCGOp *op)
{
- TCGCond cond = op->args[2];
- int i;
-
- if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) {
- op->args[2] = cond = tcg_swap_cond(cond);
- }
-
- i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond);
+ int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
+ &op->args[1], &op->args[2]);
if (i == 0) {
tcg_op_remove(ctx->tcg, op);
return true;
@@ -1203,15 +1357,13 @@ static bool fold_brcond(OptContext *ctx, TCGOp *op)
static bool fold_brcond2(OptContext *ctx, TCGOp *op)
{
- TCGCond cond = op->args[4];
- TCGArg label = op->args[5];
+ TCGCond cond;
+ TCGArg label;
int i, inv = 0;
- if (swap_commutative2(&op->args[0], &op->args[2])) {
- op->args[4] = cond = tcg_swap_cond(cond);
- }
-
- i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond);
+ i = do_constant_folding_cond2(ctx, op, &op->args[0]);
+ cond = op->args[4];
+ label = op->args[5];
if (i >= 0) {
goto do_brcond_const;
}
@@ -1223,8 +1375,8 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
* Simplify LT/GE comparisons vs zero to a single compare
* vs the high word of the input.
*/
- if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 &&
- arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) {
+ if (arg_is_const_val(op->args[2], 0) &&
+ arg_is_const_val(op->args[3], 0)) {
goto do_brcond_high;
}
break;
@@ -1252,24 +1404,37 @@ static bool fold_brcond2(OptContext *ctx, TCGOp *op)
case 0:
goto do_brcond_const;
case 1:
- op->opc = INDEX_op_brcond_i32;
- op->args[1] = op->args[2];
- op->args[2] = cond;
- op->args[3] = label;
- break;
+ goto do_brcond_low;
+ }
+ break;
+
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ if (arg_is_const_val(op->args[2], 0)) {
+ goto do_brcond_high;
+ }
+ if (arg_is_const_val(op->args[3], 0)) {
+ goto do_brcond_low;
}
break;
default:
break;
+ do_brcond_low:
+ op->opc = INDEX_op_brcond_i32;
+ op->args[1] = op->args[2];
+ op->args[2] = cond;
+ op->args[3] = label;
+ return fold_brcond(ctx, op);
+
do_brcond_high:
op->opc = INDEX_op_brcond_i32;
op->args[0] = op->args[1];
op->args[1] = op->args[3];
op->args[2] = cond;
op->args[3] = label;
- break;
+ return fold_brcond(ctx, op);
do_brcond_const:
if (i == 0) {
@@ -1448,9 +1613,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
}
/* Inserting a value into zero at offset 0. */
- if (arg_is_const(op->args[1])
- && arg_info(op->args[1])->val == 0
- && op->args[3] == 0) {
+ if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
op->opc = and_opc;
@@ -1461,8 +1624,7 @@ static bool fold_deposit(OptContext *ctx, TCGOp *op)
}
/* Inserting zero into a value. */
- if (arg_is_const(op->args[2])
- && arg_info(op->args[2])->val == 0) {
+ if (arg_is_const_val(op->args[2], 0)) {
uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
op->opc = and_opc;
@@ -1687,21 +1849,18 @@ static bool fold_mov(OptContext *ctx, TCGOp *op)
static bool fold_movcond(OptContext *ctx, TCGOp *op)
{
- TCGCond cond = op->args[5];
int i;
- if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
- op->args[5] = cond = tcg_swap_cond(cond);
- }
/*
* Canonicalize the "false" input reg to match the destination reg so
* that the tcg backend can implement a "move if true" operation.
*/
if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
- op->args[5] = cond = tcg_invert_cond(cond);
+ op->args[5] = tcg_invert_cond(op->args[5]);
}
- i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
+ i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
+ &op->args[2], &op->args[5]);
if (i >= 0) {
return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
}
@@ -1715,6 +1874,7 @@ static bool fold_movcond(OptContext *ctx, TCGOp *op)
uint64_t tv = arg_info(op->args[3])->val;
uint64_t fv = arg_info(op->args[4])->val;
TCGOpcode opc, negopc = 0;
+ TCGCond cond = op->args[5];
switch (ctx->type) {
case TCG_TYPE_I32:
@@ -1940,19 +2100,107 @@ static bool fold_remainder(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_setcond(OptContext *ctx, TCGOp *op)
+static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
{
+ TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc, uext_opc, sext_opc;
TCGCond cond = op->args[3];
- int i;
+ TCGArg ret, src1, src2;
+ TCGOp *op2;
+ uint64_t val;
+ int sh;
+ bool inv;
+
+ if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
+ return;
+ }
+
+ src2 = op->args[2];
+ val = arg_info(src2)->val;
+ if (!is_power_of_2(val)) {
+ return;
+ }
+ sh = ctz64(val);
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
- op->args[3] = cond = tcg_swap_cond(cond);
+ switch (ctx->type) {
+ case TCG_TYPE_I32:
+ and_opc = INDEX_op_and_i32;
+ sub_opc = INDEX_op_sub_i32;
+ xor_opc = INDEX_op_xor_i32;
+ shr_opc = INDEX_op_shr_i32;
+ neg_opc = INDEX_op_neg_i32;
+ if (TCG_TARGET_extract_i32_valid(sh, 1)) {
+ uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
+ sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
+ }
+ break;
+ case TCG_TYPE_I64:
+ and_opc = INDEX_op_and_i64;
+ sub_opc = INDEX_op_sub_i64;
+ xor_opc = INDEX_op_xor_i64;
+ shr_opc = INDEX_op_shr_i64;
+ neg_opc = INDEX_op_neg_i64;
+ if (TCG_TARGET_extract_i64_valid(sh, 1)) {
+ uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
+ sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ ret = op->args[0];
+ src1 = op->args[1];
+ inv = cond == TCG_COND_TSTEQ;
+
+ if (sh && sext_opc && neg && !inv) {
+ op->opc = sext_opc;
+ op->args[1] = src1;
+ op->args[2] = sh;
+ op->args[3] = 1;
+ return;
+ } else if (sh && uext_opc) {
+ op->opc = uext_opc;
+ op->args[1] = src1;
+ op->args[2] = sh;
+ op->args[3] = 1;
+ } else {
+ if (sh) {
+ op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
+ op2->args[0] = ret;
+ op2->args[1] = src1;
+ op2->args[2] = arg_new_constant(ctx, sh);
+ src1 = ret;
+ }
+ op->opc = and_opc;
+ op->args[1] = src1;
+ op->args[2] = arg_new_constant(ctx, 1);
}
- i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
+ if (neg && inv) {
+ op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
+ op2->args[0] = ret;
+ op2->args[1] = ret;
+ op2->args[2] = arg_new_constant(ctx, 1);
+ } else if (inv) {
+ op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
+ op2->args[0] = ret;
+ op2->args[1] = ret;
+ op2->args[2] = arg_new_constant(ctx, 1);
+ } else if (neg) {
+ op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
+ op2->args[0] = ret;
+ op2->args[1] = ret;
+ }
+}
+
+static bool fold_setcond(OptContext *ctx, TCGOp *op)
+{
+ int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
+ &op->args[2], &op->args[3]);
if (i >= 0) {
return tcg_opt_gen_movi(ctx, op, op->args[0], i);
}
+ fold_setcond_tst_pow2(ctx, op, false);
ctx->z_mask = 1;
ctx->s_mask = smask_from_zmask(1);
@@ -1961,34 +2209,25 @@ static bool fold_setcond(OptContext *ctx, TCGOp *op)
static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
{
- TCGCond cond = op->args[3];
- int i;
-
- if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) {
- op->args[3] = cond = tcg_swap_cond(cond);
- }
-
- i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond);
+ int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
+ &op->args[2], &op->args[3]);
if (i >= 0) {
return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
}
+ fold_setcond_tst_pow2(ctx, op, true);
/* Value is {0,-1} so all bits are repetitions of the sign. */
ctx->s_mask = -1;
return false;
}
-
static bool fold_setcond2(OptContext *ctx, TCGOp *op)
{
- TCGCond cond = op->args[5];
+ TCGCond cond;
int i, inv = 0;
- if (swap_commutative2(&op->args[1], &op->args[3])) {
- op->args[5] = cond = tcg_swap_cond(cond);
- }
-
- i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond);
+ i = do_constant_folding_cond2(ctx, op, &op->args[1]);
+ cond = op->args[5];
if (i >= 0) {
goto do_setcond_const;
}
@@ -2000,8 +2239,8 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
* Simplify LT/GE comparisons vs zero to a single compare
* vs the high word of the input.
*/
- if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 &&
- arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) {
+ if (arg_is_const_val(op->args[3], 0) &&
+ arg_is_const_val(op->args[4], 0)) {
goto do_setcond_high;
}
break;
@@ -2029,22 +2268,35 @@ static bool fold_setcond2(OptContext *ctx, TCGOp *op)
case 0:
goto do_setcond_const;
case 1:
- op->args[2] = op->args[3];
- op->args[3] = cond;
- op->opc = INDEX_op_setcond_i32;
- break;
+ goto do_setcond_low;
+ }
+ break;
+
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ if (arg_is_const_val(op->args[2], 0)) {
+ goto do_setcond_high;
+ }
+ if (arg_is_const_val(op->args[4], 0)) {
+ goto do_setcond_low;
}
break;
default:
break;
+ do_setcond_low:
+ op->args[2] = op->args[3];
+ op->args[3] = cond;
+ op->opc = INDEX_op_setcond_i32;
+ return fold_setcond(ctx, op);
+
do_setcond_high:
op->args[1] = op->args[2];
op->args[2] = op->args[4];
op->args[3] = cond;
op->opc = INDEX_op_setcond_i32;
- break;
+ return fold_setcond(ctx, op);
}
ctx->z_mask = 1;
diff --git a/tcg/ppc/tcg-target-con-set.h b/tcg/ppc/tcg-target-con-set.h
index cb47b29..9f99bde 100644
--- a/tcg/ppc/tcg-target-con-set.h
+++ b/tcg/ppc/tcg-target-con-set.h
@@ -11,7 +11,7 @@
*/
C_O0_I1(r)
C_O0_I2(r, r)
-C_O0_I2(r, ri)
+C_O0_I2(r, rC)
C_O0_I2(v, r)
C_O0_I3(r, r, r)
C_O0_I3(o, m, r)
@@ -26,13 +26,14 @@ C_O1_I2(r, rI, ri)
C_O1_I2(r, rI, rT)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
+C_O1_I2(r, r, rC)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rT)
C_O1_I2(r, r, rU)
C_O1_I2(r, r, rZW)
C_O1_I2(v, v, v)
C_O1_I3(v, v, v, v)
-C_O1_I4(r, r, ri, rZ, rZ)
+C_O1_I4(r, r, rC, rZ, rZ)
C_O1_I4(r, r, r, ri, ri)
C_O2_I1(r, r, r)
C_N1O1_I1(o, m, r)
diff --git a/tcg/ppc/tcg-target-con-str.h b/tcg/ppc/tcg-target-con-str.h
index 2084690..16b6872 100644
--- a/tcg/ppc/tcg-target-con-str.h
+++ b/tcg/ppc/tcg-target-con-str.h
@@ -16,6 +16,7 @@ REGS('v', ALL_VECTOR_REGS)
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
+CONST('C', TCG_CT_CONST_CMP)
CONST('I', TCG_CT_CONST_S16)
CONST('M', TCG_CT_CONST_MONE)
CONST('T', TCG_CT_CONST_S32)
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index 5481696..7f3829b 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -92,11 +92,13 @@
#define SZR (TCG_TARGET_REG_BITS / 8)
#define TCG_CT_CONST_S16 0x100
+#define TCG_CT_CONST_U16 0x200
#define TCG_CT_CONST_S32 0x400
#define TCG_CT_CONST_U32 0x800
#define TCG_CT_CONST_ZERO 0x1000
#define TCG_CT_CONST_MONE 0x2000
#define TCG_CT_CONST_WSZ 0x4000
+#define TCG_CT_CONST_CMP 0x8000
#define ALL_GENERAL_REGS 0xffffffffu
#define ALL_VECTOR_REGS 0xffffffff00000000ull
@@ -281,31 +283,78 @@ static bool reloc_pc34(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
return false;
}
+static bool mask_operand(uint32_t c, int *mb, int *me);
+static bool mask64_operand(uint64_t c, int *mb, int *me);
+
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t sval, int ct,
+ TCGType type, TCGCond cond, int vece)
{
+ uint64_t uval = sval;
+ int mb, me;
+
if (ct & TCG_CT_CONST) {
return 1;
}
- /* The only 32-bit constraint we use aside from
- TCG_CT_CONST is TCG_CT_CONST_S16. */
if (type == TCG_TYPE_I32) {
- val = (int32_t)val;
+ uval = (uint32_t)sval;
+ sval = (int32_t)sval;
+ }
+
+ if (ct & TCG_CT_CONST_CMP) {
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ ct |= TCG_CT_CONST_S16 | TCG_CT_CONST_U16;
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ ct |= TCG_CT_CONST_S16;
+ break;
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ ct |= TCG_CT_CONST_U16;
+ break;
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ if ((uval & ~0xffff) == 0 || (uval & ~0xffff0000ull) == 0) {
+ return 1;
+ }
+ if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32
+ ? mask_operand(uval, &mb, &me)
+ : mask64_operand(uval << clz64(uval), &mb, &me)) {
+ return 1;
+ }
+ return 0;
+ default:
+ g_assert_not_reached();
+ }
}
- if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
+ if ((ct & TCG_CT_CONST_S16) && sval == (int16_t)sval) {
+ return 1;
+ }
+ if ((ct & TCG_CT_CONST_U16) && uval == (uint16_t)uval) {
return 1;
- } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
+ }
+ if ((ct & TCG_CT_CONST_S32) && sval == (int32_t)sval) {
return 1;
- } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
+ }
+ if ((ct & TCG_CT_CONST_U32) && uval == (uint32_t)uval) {
return 1;
- } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ }
+ if ((ct & TCG_CT_CONST_ZERO) && sval == 0) {
return 1;
- } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
+ }
+ if ((ct & TCG_CT_CONST_MONE) && sval == -1) {
return 1;
- } else if ((ct & TCG_CT_CONST_WSZ)
- && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ }
+ if ((ct & TCG_CT_CONST_WSZ) && sval == (type == TCG_TYPE_I32 ? 32 : 64)) {
return 1;
}
return 0;
@@ -669,31 +718,35 @@ enum {
CR_SO
};
-static const uint32_t tcg_to_bc[] = {
- [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
- [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
- [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
- [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
- [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
- [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
- [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
- [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
- [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
- [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
+static const uint32_t tcg_to_bc[16] = {
+ [TCG_COND_EQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE,
+ [TCG_COND_NE] = BC | BI(0, CR_EQ) | BO_COND_FALSE,
+ [TCG_COND_TSTEQ] = BC | BI(0, CR_EQ) | BO_COND_TRUE,
+ [TCG_COND_TSTNE] = BC | BI(0, CR_EQ) | BO_COND_FALSE,
+ [TCG_COND_LT] = BC | BI(0, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GE] = BC | BI(0, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LE] = BC | BI(0, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GT] = BC | BI(0, CR_GT) | BO_COND_TRUE,
+ [TCG_COND_LTU] = BC | BI(0, CR_LT) | BO_COND_TRUE,
+ [TCG_COND_GEU] = BC | BI(0, CR_LT) | BO_COND_FALSE,
+ [TCG_COND_LEU] = BC | BI(0, CR_GT) | BO_COND_FALSE,
+ [TCG_COND_GTU] = BC | BI(0, CR_GT) | BO_COND_TRUE,
};
/* The low bit here is set if the RA and RB fields must be inverted. */
-static const uint32_t tcg_to_isel[] = {
- [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
- [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
- [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
- [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
- [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
- [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
- [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
- [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
- [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
- [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
+static const uint32_t tcg_to_isel[16] = {
+ [TCG_COND_EQ] = ISEL | BC_(0, CR_EQ),
+ [TCG_COND_NE] = ISEL | BC_(0, CR_EQ) | 1,
+ [TCG_COND_TSTEQ] = ISEL | BC_(0, CR_EQ),
+ [TCG_COND_TSTNE] = ISEL | BC_(0, CR_EQ) | 1,
+ [TCG_COND_LT] = ISEL | BC_(0, CR_LT),
+ [TCG_COND_GE] = ISEL | BC_(0, CR_LT) | 1,
+ [TCG_COND_LE] = ISEL | BC_(0, CR_GT) | 1,
+ [TCG_COND_GT] = ISEL | BC_(0, CR_GT),
+ [TCG_COND_LTU] = ISEL | BC_(0, CR_LT),
+ [TCG_COND_GEU] = ISEL | BC_(0, CR_LT) | 1,
+ [TCG_COND_LEU] = ISEL | BC_(0, CR_GT) | 1,
+ [TCG_COND_GTU] = ISEL | BC_(0, CR_GT),
};
static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
@@ -838,19 +891,31 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
return true;
}
-static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
- int sh, int mb)
+static void tcg_out_rld_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
+ int sh, int mb, bool rc)
{
tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
- tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
+ tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb | rc);
+}
+
+static void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
+ int sh, int mb)
+{
+ tcg_out_rld_rc(s, op, ra, rs, sh, mb, false);
+}
+
+static void tcg_out_rlw_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs,
+ int sh, int mb, int me, bool rc)
+{
+ tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me) | rc);
}
-static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
- int sh, int mb, int me)
+static void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
+ int sh, int mb, int me)
{
- tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
+ tcg_out_rlw_rc(s, op, ra, rs, sh, mb, me, false);
}
static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
@@ -1668,6 +1733,50 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
return false;
}
+/*
+ * Set dest non-zero if and only if (arg1 & arg2) is non-zero.
+ * If RC, then also set RC0.
+ */
+static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2,
+ bool const_arg2, TCGType type, bool rc)
+{
+ int mb, me;
+
+ if (!const_arg2) {
+ tcg_out32(s, AND | SAB(arg1, dest, arg2) | rc);
+ return;
+ }
+
+ if (type == TCG_TYPE_I32) {
+ arg2 = (uint32_t)arg2;
+ } else if (arg2 == (uint32_t)arg2) {
+ type = TCG_TYPE_I32;
+ }
+
+ if ((arg2 & ~0xffff) == 0) {
+ tcg_out32(s, ANDI | SAI(arg1, dest, arg2));
+ return;
+ }
+ if ((arg2 & ~0xffff0000ull) == 0) {
+ tcg_out32(s, ANDIS | SAI(arg1, dest, arg2 >> 16));
+ return;
+ }
+ if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) {
+ if (mask_operand(arg2, &mb, &me)) {
+ tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc);
+ return;
+ }
+ } else {
+ int sh = clz64(arg2);
+ if (mask64_operand(arg2 << sh, &mb, &me)) {
+ tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc);
+ return;
+ }
+ }
+ /* Constraints should satisfy this. */
+ g_assert_not_reached();
+}
+
static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
int const_arg2, int cr, TCGType type)
{
@@ -1676,7 +1785,10 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
- /* Simplify the comparisons below wrt CMPI. */
+ /*
+ * Simplify the comparisons below wrt CMPI.
+ * All of the tests are 16-bit, so a 32-bit sign extend always works.
+ */
if (type == TCG_TYPE_I32) {
arg2 = (int32_t)arg2;
}
@@ -1699,6 +1811,12 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
imm = 0;
break;
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ tcg_debug_assert(cr == 0);
+ tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, true);
+ return;
+
case TCG_COND_LT:
case TCG_COND_GE:
case TCG_COND_LE:
@@ -1826,7 +1944,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
if (have_isa_3_10) {
tcg_insn_unit bi, opc;
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
/* Re-use tcg_to_bc for BI and BO_COND_{TRUE,FALSE}. */
bi = tcg_to_bc[cond] & (0x1f << 16);
@@ -1879,7 +1997,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
if (have_isel) {
int isel, tab;
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
isel = tcg_to_isel[cond];
@@ -1909,6 +2027,16 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
tcg_out_setcond_ne0(s, type, arg0, arg1, neg);
break;
+ case TCG_COND_TSTEQ:
+ tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
+ tcg_out_setcond_eq0(s, type, arg0, TCG_REG_R0, neg);
+ break;
+
+ case TCG_COND_TSTNE:
+ tcg_out_test(s, TCG_REG_R0, arg1, arg2, const_arg2, type, false);
+ tcg_out_setcond_ne0(s, type, arg0, TCG_REG_R0, neg);
+ break;
+
case TCG_COND_LE:
case TCG_COND_LEU:
inv = true;
@@ -1945,22 +2073,28 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
}
}
-static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
+static void tcg_out_bc(TCGContext *s, TCGCond cond, int bd)
+{
+ tcg_out32(s, tcg_to_bc[cond] | bd);
+}
+
+static void tcg_out_bc_lab(TCGContext *s, TCGCond cond, TCGLabel *l)
{
+ int bd = 0;
if (l->has_value) {
- bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
+ bd = reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
} else {
tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
}
- tcg_out32(s, bc);
+ tcg_out_bc(s, cond, bd);
}
static void tcg_out_brcond(TCGContext *s, TCGCond cond,
TCGArg arg1, TCGArg arg2, int const_arg2,
TCGLabel *l, TCGType type)
{
- tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
- tcg_out_bc(s, tcg_to_bc[cond], l);
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 0, type);
+ tcg_out_bc_lab(s, cond, l);
}
static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
@@ -1973,7 +2107,7 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
return;
}
- tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
+ tcg_out_cmp(s, cond, c1, c2, const_c2, 0, type);
if (have_isel) {
int isel = tcg_to_isel[cond];
@@ -2002,7 +2136,7 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
}
}
/* Branch forward over one insn */
- tcg_out32(s, tcg_to_bc[cond] | 8);
+ tcg_out_bc(s, cond, 8);
if (v2 == 0) {
tcg_out_movi(s, type, dest, 0);
} else {
@@ -2017,17 +2151,17 @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
tcg_out32(s, opc | RA(a0) | RS(a1));
} else {
- tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
+ tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 0, type);
/* Note that the only other valid constant for a2 is 0. */
if (have_isel) {
tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
} else if (!const_a2 && a0 == a2) {
- tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
+ tcg_out_bc(s, TCG_COND_EQ, 8);
tcg_out32(s, opc | RA(a0) | RS(a1));
} else {
tcg_out32(s, opc | RA(a0) | RS(a1));
- tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
+ tcg_out_bc(s, TCG_COND_NE, 8);
if (const_a2) {
tcg_out_movi(s, type, a0, 0);
} else {
@@ -2072,7 +2206,22 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
do_equality:
tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
- tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
+ tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
+ break;
+
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ if (blconst) {
+ tcg_out_andi32(s, TCG_REG_R0, al, bl);
+ } else {
+ tcg_out32(s, AND | SAB(al, TCG_REG_R0, bl));
+ }
+ if (bhconst) {
+ tcg_out_andi32(s, TCG_REG_TMP1, ah, bh);
+ } else {
+ tcg_out32(s, AND | SAB(ah, TCG_REG_TMP1, bh));
+ }
+ tcg_out32(s, OR | SAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_TMP1) | 1);
break;
case TCG_COND_LT:
@@ -2090,8 +2239,8 @@ static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
- tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
- tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
+ tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
+ tcg_out32(s, CROR | BT(0, CR_EQ) | BA(6, bit1) | BB(0, CR_EQ));
break;
default:
@@ -2103,15 +2252,15 @@ static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
const int *const_args)
{
tcg_out_cmp2(s, args + 1, const_args + 1);
- tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
- tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
+ tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0));
+ tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31);
}
-static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
- const int *const_args)
+static void tcg_out_brcond2(TCGContext *s, const TCGArg *args,
+ const int *const_args)
{
tcg_out_cmp2(s, args, const_args);
- tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
+ tcg_out_bc_lab(s, TCG_COND_EQ, arg_label(args[5]));
}
static void tcg_out_mb(TCGContext *s, TCGArg a0)
@@ -2435,17 +2584,17 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2,
0, 6, TCG_TYPE_I32);
- /* Combine comparisons into cr7. */
- tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
+ /* Combine comparisons into cr0. */
+ tcg_out32(s, CRAND | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
} else {
- /* Full comparison into cr7. */
+ /* Full comparison into cr0. */
tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
- 0, 7, addr_type);
+ 0, 0, addr_type);
}
/* Load a pointer into the current opcode w/conditional branch-link. */
ldst->label_ptr[0] = s->code_ptr;
- tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
+ tcg_out_bc(s, TCG_COND_NE, LK);
h->base = TCG_REG_TMP1;
} else {
@@ -3979,8 +4128,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_sar_i32:
case INDEX_op_rotl_i32:
case INDEX_op_rotr_i32:
- case INDEX_op_setcond_i32:
- case INDEX_op_negsetcond_i32:
case INDEX_op_and_i64:
case INDEX_op_andc_i64:
case INDEX_op_shl_i64:
@@ -3988,8 +4135,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_sar_i64:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i64:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i64:
return C_O1_I2(r, r, ri);
case INDEX_op_mul_i32:
@@ -4033,11 +4178,16 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_brcond_i32:
case INDEX_op_brcond_i64:
- return C_O0_I2(r, ri);
-
+ return C_O0_I2(r, rC);
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ case INDEX_op_negsetcond_i32:
+ case INDEX_op_negsetcond_i64:
+ return C_O1_I2(r, r, rC);
case INDEX_op_movcond_i32:
case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, ri, rZ, rZ);
+ return C_O1_I4(r, r, rC, rZ, rZ);
+
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64:
return C_O1_I2(r, 0, rZ);
diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h
index 5295e4f..04a7aba 100644
--- a/tcg/ppc/tcg-target.h
+++ b/tcg/ppc/tcg-target.h
@@ -143,6 +143,8 @@ typedef enum {
#define TCG_TARGET_HAS_qemu_ldst_i128 \
(TCG_TARGET_REG_BITS == 64 && have_isa_2_07)
+#define TCG_TARGET_HAS_tst 1
+
/*
* While technically Altivec could support V64, it has no 64-bit store
* instruction and substituting two 32-bit stores makes the generated
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc
index 34e10e7..6393630 100644
--- a/tcg/riscv/tcg-target.c.inc
+++ b/tcg/riscv/tcg-target.c.inc
@@ -145,7 +145,8 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define sextreg sextract64
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h
index a4edc3d..2c1b680 100644
--- a/tcg/riscv/tcg-target.h
+++ b/tcg/riscv/tcg-target.h
@@ -158,6 +158,8 @@ extern bool have_zbb;
#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 0
+
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_NEED_LDST_LABELS
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h
index 9a42037..f75955e 100644
--- a/tcg/s390x/tcg-target-con-set.h
+++ b/tcg/s390x/tcg-target-con-set.h
@@ -15,7 +15,7 @@
C_O0_I1(r)
C_O0_I2(r, r)
C_O0_I2(r, ri)
-C_O0_I2(r, rA)
+C_O0_I2(r, rC)
C_O0_I2(v, r)
C_O0_I3(o, m, r)
C_O1_I1(r, r)
@@ -27,7 +27,7 @@ C_O1_I2(r, 0, rI)
C_O1_I2(r, 0, rJ)
C_O1_I2(r, r, r)
C_O1_I2(r, r, ri)
-C_O1_I2(r, r, rA)
+C_O1_I2(r, r, rC)
C_O1_I2(r, r, rI)
C_O1_I2(r, r, rJ)
C_O1_I2(r, r, rK)
@@ -39,10 +39,10 @@ C_O1_I2(v, v, r)
C_O1_I2(v, v, v)
C_O1_I3(v, v, v, v)
C_O1_I4(r, r, ri, rI, r)
-C_O1_I4(r, r, rA, rI, r)
+C_O1_I4(r, r, rC, rI, r)
C_O2_I1(o, m, r)
C_O2_I2(o, m, 0, r)
C_O2_I2(o, m, r, r)
C_O2_I3(o, m, 0, 1, r)
C_N1_O1_I4(r, r, 0, 1, ri, r)
-C_N1_O1_I4(r, r, 0, 1, rA, r)
+C_N1_O1_I4(r, r, 0, 1, rJU, r)
diff --git a/tcg/s390x/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h
index 25675b4..745f6c0d 100644
--- a/tcg/s390x/tcg-target-con-str.h
+++ b/tcg/s390x/tcg-target-con-str.h
@@ -16,10 +16,11 @@ REGS('o', 0xaaaa) /* odd numbered general regs */
* Define constraint letters for constants:
* CONST(letter, TCG_CT_CONST_* bit set)
*/
-CONST('A', TCG_CT_CONST_S33)
+CONST('C', TCG_CT_CONST_CMP)
CONST('I', TCG_CT_CONST_S16)
CONST('J', TCG_CT_CONST_S32)
CONST('K', TCG_CT_CONST_P32)
CONST('N', TCG_CT_CONST_INV)
CONST('R', TCG_CT_CONST_INVRISBG)
+CONST('U', TCG_CT_CONST_U32)
CONST('Z', TCG_CT_CONST_ZERO)
diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc
index 7f6b84a..ad58732 100644
--- a/tcg/s390x/tcg-target.c.inc
+++ b/tcg/s390x/tcg-target.c.inc
@@ -30,11 +30,12 @@
#define TCG_CT_CONST_S16 (1 << 8)
#define TCG_CT_CONST_S32 (1 << 9)
-#define TCG_CT_CONST_S33 (1 << 10)
+#define TCG_CT_CONST_U32 (1 << 10)
#define TCG_CT_CONST_ZERO (1 << 11)
#define TCG_CT_CONST_P32 (1 << 12)
#define TCG_CT_CONST_INV (1 << 13)
#define TCG_CT_CONST_INVRISBG (1 << 14)
+#define TCG_CT_CONST_CMP (1 << 15)
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
@@ -111,6 +112,9 @@ typedef enum S390Opcode {
RI_OILH = 0xa50a,
RI_OILL = 0xa50b,
RI_TMLL = 0xa701,
+ RI_TMLH = 0xa700,
+ RI_TMHL = 0xa703,
+ RI_TMHH = 0xa702,
RIEb_CGRJ = 0xec64,
RIEb_CLGRJ = 0xec65,
@@ -403,10 +407,15 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define S390_CC_NEVER 0
#define S390_CC_ALWAYS 15
+#define S390_TM_EQ 8 /* CC == 0 */
+#define S390_TM_NE 7 /* CC in {1,2,3} */
+
/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
-static const uint8_t tcg_cond_to_s390_cond[] = {
+static const uint8_t tcg_cond_to_s390_cond[16] = {
[TCG_COND_EQ] = S390_CC_EQ,
[TCG_COND_NE] = S390_CC_NE,
+ [TCG_COND_TSTEQ] = S390_CC_EQ,
+ [TCG_COND_TSTNE] = S390_CC_NE,
[TCG_COND_LT] = S390_CC_LT,
[TCG_COND_LE] = S390_CC_LE,
[TCG_COND_GT] = S390_CC_GT,
@@ -420,9 +429,11 @@ static const uint8_t tcg_cond_to_s390_cond[] = {
/* Condition codes that result from a LOAD AND TEST. Here, we have no
unsigned instruction variation, however since the test is vs zero we
can re-map the outcomes appropriately. */
-static const uint8_t tcg_cond_to_ltr_cond[] = {
+static const uint8_t tcg_cond_to_ltr_cond[16] = {
[TCG_COND_EQ] = S390_CC_EQ,
[TCG_COND_NE] = S390_CC_NE,
+ [TCG_COND_TSTEQ] = S390_CC_ALWAYS,
+ [TCG_COND_TSTNE] = S390_CC_NEVER,
[TCG_COND_LT] = S390_CC_LT,
[TCG_COND_LE] = S390_CC_LE,
[TCG_COND_GT] = S390_CC_GT,
@@ -538,42 +549,74 @@ static bool risbg_mask(uint64_t c)
}
/* Test if a constant matches the constraint. */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
+ uint64_t uval = val;
+
if (ct & TCG_CT_CONST) {
- return 1;
+ return true;
}
-
if (type == TCG_TYPE_I32) {
+ uval = (uint32_t)val;
val = (int32_t)val;
}
- /* The following are mutually exclusive. */
- if (ct & TCG_CT_CONST_S16) {
- return val == (int16_t)val;
- } else if (ct & TCG_CT_CONST_S32) {
- return val == (int32_t)val;
- } else if (ct & TCG_CT_CONST_S33) {
- return val >= -0xffffffffll && val <= 0xffffffffll;
- } else if (ct & TCG_CT_CONST_ZERO) {
- return val == 0;
+ if (ct & TCG_CT_CONST_CMP) {
+ switch (cond) {
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ ct |= TCG_CT_CONST_S32 | TCG_CT_CONST_U32; /* CGFI or CLGFI */
+ break;
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ ct |= TCG_CT_CONST_S32; /* CGFI */
+ break;
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ ct |= TCG_CT_CONST_U32; /* CLGFI */
+ break;
+ case TCG_COND_TSTNE:
+ case TCG_COND_TSTEQ:
+ if (is_const_p16(uval) >= 0) {
+ return true; /* TMxx */
+ }
+ if (risbg_mask(uval)) {
+ return true; /* RISBG */
+ }
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ }
+
+ if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
+ return true;
+ }
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
+ return true;
}
if (ct & TCG_CT_CONST_INV) {
val = ~val;
}
- /*
- * Note that is_const_p16 is a subset of is_const_p32,
- * so we don't need both constraints.
- */
if ((ct & TCG_CT_CONST_P32) && is_const_p32(val) >= 0) {
return true;
}
if ((ct & TCG_CT_CONST_INVRISBG) && risbg_mask(~val)) {
return true;
}
-
- return 0;
+ return false;
}
/* Emit instructions according to the given instruction format. */
@@ -843,6 +886,9 @@ static const S390Opcode oi_insns[4] = {
static const S390Opcode lif_insns[2] = {
RIL_LLILF, RIL_LLIHF,
};
+static const S390Opcode tm_insns[4] = {
+ RI_TMLL, RI_TMLH, RI_TMHL, RI_TMHH
+};
/* load a register with an immediate value */
static void tcg_out_movi(TCGContext *s, TCGType type,
@@ -1203,6 +1249,36 @@ static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
TCGCond inv_c = tcg_invert_cond(c);
S390Opcode op;
+ if (is_tst_cond(c)) {
+ tcg_debug_assert(!need_carry);
+
+ if (!c2const) {
+ if (type == TCG_TYPE_I32) {
+ tcg_out_insn(s, RRFa, NRK, TCG_REG_R0, r1, c2);
+ } else {
+ tcg_out_insn(s, RRFa, NGRK, TCG_REG_R0, r1, c2);
+ }
+ goto exit;
+ }
+
+ if (type == TCG_TYPE_I32) {
+ c2 = (uint32_t)c2;
+ }
+
+ int i = is_const_p16(c2);
+ if (i >= 0) {
+ tcg_out_insn_RI(s, tm_insns[i], r1, c2 >> (i * 16));
+ *inv_cc = c == TCG_COND_TSTEQ ? S390_TM_NE : S390_TM_EQ;
+ return *inv_cc ^ 15;
+ }
+
+ if (risbg_mask(c2)) {
+ tgen_andi_risbg(s, TCG_REG_R0, r1, c2);
+ goto exit;
+ }
+ g_assert_not_reached();
+ }
+
if (c2const) {
if (c2 == 0) {
if (!(is_unsigned && need_carry)) {
@@ -1228,22 +1304,34 @@ static int tgen_cmp2(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
goto exit;
}
- /*
- * Constraints are for a signed 33-bit operand, which is a
- * convenient superset of this signed/unsigned test.
- */
- if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
- op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
- tcg_out_insn_RIL(s, op, r1, c2);
- goto exit;
+ /* Should match TCG_CT_CONST_CMP. */
+ switch (c) {
+ case TCG_COND_LT:
+ case TCG_COND_GE:
+ case TCG_COND_LE:
+ case TCG_COND_GT:
+ tcg_debug_assert(c2 == (int32_t)c2);
+ op = RIL_CGFI;
+ break;
+ case TCG_COND_EQ:
+ case TCG_COND_NE:
+ if (c2 == (int32_t)c2) {
+ op = RIL_CGFI;
+ break;
+ }
+ /* fall through */
+ case TCG_COND_LTU:
+ case TCG_COND_GEU:
+ case TCG_COND_LEU:
+ case TCG_COND_GTU:
+ tcg_debug_assert(c2 == (uint32_t)c2);
+ op = RIL_CLGFI;
+ break;
+ default:
+ g_assert_not_reached();
}
-
- /* Load everything else into a register. */
- tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, c2);
- c2 = TCG_TMP0;
- }
-
- if (type == TCG_TYPE_I32) {
+ tcg_out_insn_RIL(s, op, r1, c2);
+ } else if (type == TCG_TYPE_I32) {
op = (is_unsigned ? RR_CLR : RR_CR);
tcg_out_insn_RR(s, op, r1, c2);
} else {
@@ -1516,46 +1604,49 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
{
int cc;
- bool is_unsigned = is_unsigned_cond(c);
- bool in_range;
- S390Opcode opc;
- cc = tcg_cond_to_s390_cond[c];
+ if (!is_tst_cond(c)) {
+ bool is_unsigned = is_unsigned_cond(c);
+ bool in_range;
+ S390Opcode opc;
- if (!c2const) {
- opc = (type == TCG_TYPE_I32
- ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
- : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
- tgen_compare_branch(s, opc, cc, r1, c2, l);
- return;
- }
+ cc = tcg_cond_to_s390_cond[c];
- /*
- * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
- * If the immediate we've been given does not fit that range, we'll
- * fall back to separate compare and branch instructions using the
- * larger comparison range afforded by COMPARE IMMEDIATE.
- */
- if (type == TCG_TYPE_I32) {
- if (is_unsigned) {
- opc = RIEc_CLIJ;
- in_range = (uint32_t)c2 == (uint8_t)c2;
- } else {
- opc = RIEc_CIJ;
- in_range = (int32_t)c2 == (int8_t)c2;
+ if (!c2const) {
+ opc = (type == TCG_TYPE_I32
+ ? (is_unsigned ? RIEb_CLRJ : RIEb_CRJ)
+ : (is_unsigned ? RIEb_CLGRJ : RIEb_CGRJ));
+ tgen_compare_branch(s, opc, cc, r1, c2, l);
+ return;
}
- } else {
- if (is_unsigned) {
- opc = RIEc_CLGIJ;
- in_range = (uint64_t)c2 == (uint8_t)c2;
+
+ /*
+ * COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
+ * If the immediate we've been given does not fit that range, we'll
+ * fall back to separate compare and branch instructions using the
+ * larger comparison range afforded by COMPARE IMMEDIATE.
+ */
+ if (type == TCG_TYPE_I32) {
+ if (is_unsigned) {
+ opc = RIEc_CLIJ;
+ in_range = (uint32_t)c2 == (uint8_t)c2;
+ } else {
+ opc = RIEc_CIJ;
+ in_range = (int32_t)c2 == (int8_t)c2;
+ }
} else {
- opc = RIEc_CGIJ;
- in_range = (int64_t)c2 == (int8_t)c2;
+ if (is_unsigned) {
+ opc = RIEc_CLGIJ;
+ in_range = (uint64_t)c2 == (uint8_t)c2;
+ } else {
+ opc = RIEc_CGIJ;
+ in_range = (int64_t)c2 == (int8_t)c2;
+ }
+ }
+ if (in_range) {
+ tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
+ return;
}
- }
- if (in_range) {
- tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
- return;
}
cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
@@ -1834,11 +1925,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->oi = oi;
ldst->addrlo_reg = addr_reg;
- /* We are expecting a_bits to max out at 7, much lower than TMLL. */
tcg_debug_assert(a_mask <= 0xffff);
tcg_out_insn(s, RI, TMLL, addr_reg, a_mask);
- tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
+ tcg_out16(s, RI_BRC | (S390_TM_NE << 4));
ldst->label_ptr[0] = s->code_ptr++;
}
@@ -1919,7 +2009,7 @@ static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
l2 = gen_new_label();
tcg_out_insn(s, RI, TMLL, addr_reg, 15);
- tgen_branch(s, 7, l1); /* CC in {1,2,3} */
+ tgen_branch(s, S390_TM_NE, l1);
}
tcg_debug_assert(!need_bswap);
@@ -3136,7 +3226,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
return C_O1_I2(r, r, ri);
case INDEX_op_setcond_i64:
case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, r, rA);
+ return C_O1_I2(r, r, rC);
case INDEX_op_clz_i64:
return C_O1_I2(r, r, rI);
@@ -3186,7 +3276,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_brcond_i32:
return C_O0_I2(r, ri);
case INDEX_op_brcond_i64:
- return C_O0_I2(r, rA);
+ return C_O0_I2(r, rC);
case INDEX_op_bswap16_i32:
case INDEX_op_bswap16_i64:
@@ -3239,7 +3329,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_movcond_i32:
return C_O1_I4(r, r, ri, rI, r);
case INDEX_op_movcond_i64:
- return C_O1_I4(r, r, rA, rI, r);
+ return C_O1_I4(r, r, rC, rI, r);
case INDEX_op_div2_i32:
case INDEX_op_div2_i64:
@@ -3258,7 +3348,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_add2_i64:
case INDEX_op_sub2_i64:
- return C_N1_O1_I4(r, r, 0, 1, rA, r);
+ return C_N1_O1_I4(r, r, 0, 1, rJU, r);
case INDEX_op_st_vec:
return C_O0_I2(v, r);
diff --git a/tcg/s390x/tcg-target.h b/tcg/s390x/tcg-target.h
index e69b0d2..ae448c3 100644
--- a/tcg/s390x/tcg-target.h
+++ b/tcg/s390x/tcg-target.h
@@ -138,6 +138,8 @@ extern uint64_t s390_facilities[3];
#define TCG_TARGET_HAS_qemu_ldst_i128 1
+#define TCG_TARGET_HAS_tst 1
+
#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR)
#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR)
#define TCG_TARGET_HAS_v256 0
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index a91defd..176c987 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -322,7 +322,8 @@ static bool patch_reloc(tcg_insn_unit *src_rw, int type,
}
/* test if a constant matches the constraint */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
if (ct & TCG_CT_CONST) {
return 1;
@@ -606,9 +607,11 @@ static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
uns ? ARITH_UDIV : ARITH_SDIV);
}
-static const uint8_t tcg_cond_to_bcond[] = {
+static const uint8_t tcg_cond_to_bcond[16] = {
[TCG_COND_EQ] = COND_E,
[TCG_COND_NE] = COND_NE,
+ [TCG_COND_TSTEQ] = COND_E,
+ [TCG_COND_TSTNE] = COND_NE,
[TCG_COND_LT] = COND_L,
[TCG_COND_GE] = COND_GE,
[TCG_COND_LE] = COND_LE,
@@ -619,7 +622,7 @@ static const uint8_t tcg_cond_to_bcond[] = {
[TCG_COND_GTU] = COND_GU,
};
-static const uint8_t tcg_cond_to_rcond[] = {
+static const uint8_t tcg_cond_to_rcond[16] = {
[TCG_COND_EQ] = RCOND_Z,
[TCG_COND_NE] = RCOND_NZ,
[TCG_COND_LT] = RCOND_LZ,
@@ -645,15 +648,17 @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
tcg_out_bpcc0(s, scond, flags, off19);
}
-static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
+static void tcg_out_cmp(TCGContext *s, TCGCond cond,
+ TCGReg c1, int32_t c2, int c2const)
{
- tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
+ tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const,
+ is_tst_cond(cond) ? ARITH_ANDCC : ARITH_SUBCC);
}
static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
int32_t arg2, int const_arg2, TCGLabel *l)
{
- tcg_out_cmp(s, arg1, arg2, const_arg2);
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
tcg_out_nop(s);
}
@@ -670,7 +675,7 @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg c1, int32_t c2, int c2const,
int32_t v1, int v1const)
{
- tcg_out_cmp(s, c1, c2, c2const);
+ tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
}
@@ -678,7 +683,8 @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
int32_t arg2, int const_arg2, TCGLabel *l)
{
/* For 64-bit signed comparisons vs zero, we can avoid the compare. */
- if (arg2 == 0 && !is_unsigned_cond(cond)) {
+ int rcond = tcg_cond_to_rcond[cond];
+ if (arg2 == 0 && rcond) {
int off16 = 0;
if (l->has_value) {
@@ -687,19 +693,18 @@ static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
}
tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
- | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
+ | INSN_COND(rcond) | off16);
} else {
- tcg_out_cmp(s, arg1, arg2, const_arg2);
+ tcg_out_cmp(s, cond, arg1, arg2, const_arg2);
tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
}
tcg_out_nop(s);
}
-static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
+static void tcg_out_movr(TCGContext *s, int rcond, TCGReg ret, TCGReg c1,
int32_t v1, int v1const)
{
- tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
- | (tcg_cond_to_rcond[cond] << 10)
+ tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1) | (rcond << 10)
| (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
}
@@ -710,11 +715,11 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
/* For 64-bit signed comparisons vs zero, we can avoid the compare.
Note that the immediate range is one bit smaller, so we must check
for that as well. */
- if (c2 == 0 && !is_unsigned_cond(cond)
- && (!v1const || check_fit_i32(v1, 10))) {
- tcg_out_movr(s, cond, ret, c1, v1, v1const);
+ int rcond = tcg_cond_to_rcond[cond];
+ if (c2 == 0 && rcond && (!v1const || check_fit_i32(v1, 10))) {
+ tcg_out_movr(s, rcond, ret, c1, v1, v1const);
} else {
- tcg_out_cmp(s, c1, c2, c2const);
+ tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
}
}
@@ -742,6 +747,15 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
break;
+ case TCG_COND_TSTEQ:
+ case TCG_COND_TSTNE:
+ /* Transform to inequality vs zero. */
+ tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_AND);
+ c1 = TCG_REG_G0;
+ c2 = TCG_REG_T1, c2const = 0;
+ cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
+ break;
+
case TCG_COND_GTU:
case TCG_COND_LEU:
/* If we don't need to load a constant into a register, we can
@@ -758,13 +772,13 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
/* FALLTHRU */
default:
- tcg_out_cmp(s, c1, c2, c2const);
+ tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
return;
}
- tcg_out_cmp(s, c1, c2, c2const);
+ tcg_out_cmp(s, cond, c1, c2, c2const);
if (cond == TCG_COND_LTU) {
if (neg) {
/* 0 - 0 - C = -C = (C ? -1 : 0) */
@@ -787,6 +801,8 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
TCGReg c1, int32_t c2, int c2const, bool neg)
{
+ int rcond;
+
if (use_vis3_instructions && !neg) {
switch (cond) {
case TCG_COND_NE:
@@ -796,7 +812,7 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
c2 = c1, c2const = 0, c1 = TCG_REG_G0;
/* FALLTHRU */
case TCG_COND_LTU:
- tcg_out_cmp(s, c1, c2, c2const);
+ tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
return;
default:
@@ -806,11 +822,12 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
/* For 64-bit signed comparisons vs zero, we can avoid the compare
if the input does not overlap the output. */
- if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
+ rcond = tcg_cond_to_rcond[cond];
+ if (c2 == 0 && rcond && c1 != ret) {
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movr(s, cond, ret, c1, neg ? -1 : 1, 1);
+ tcg_out_movr(s, rcond, ret, c1, neg ? -1 : 1, 1);
} else {
- tcg_out_cmp(s, c1, c2, c2const);
+ tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
}
@@ -1098,7 +1115,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_movi_s32(s, TCG_REG_T3, compare_mask);
tcg_out_arith(s, TCG_REG_T3, addr_reg, TCG_REG_T3, ARITH_AND);
}
- tcg_out_cmp(s, TCG_REG_T2, TCG_REG_T3, 0);
+ tcg_out_cmp(s, TCG_COND_NE, TCG_REG_T2, TCG_REG_T3, 0);
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
diff --git a/tcg/sparc64/tcg-target.h b/tcg/sparc64/tcg-target.h
index f8cf145..a18906a 100644
--- a/tcg/sparc64/tcg-target.h
+++ b/tcg/sparc64/tcg-target.h
@@ -149,6 +149,8 @@ extern bool use_vis3_instructions;
#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 1
+
#define TCG_AREG0 TCG_REG_I0
#define TCG_TARGET_DEFAULT_MO (0)
diff --git a/tcg/tcg-internal.h b/tcg/tcg-internal.h
index 6c9d9e4..9b0d982 100644
--- a/tcg/tcg-internal.h
+++ b/tcg/tcg-internal.h
@@ -83,6 +83,8 @@ static inline TCGv_i64 TCGV128_HIGH(TCGv_i128 t)
bool tcg_target_has_memory_bswap(MemOp memop);
+TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind);
+
/*
* Locate or create a read-only temporary that is a constant.
* This kind of temporary need not be freed, but for convenience
diff --git a/tcg/tcg.c b/tcg/tcg.c
index eeff4c1..d667023 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -173,7 +173,8 @@ static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
const TCGHelperInfo *info);
static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot);
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece);
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece);
#ifdef TCG_TARGET_NEED_LDST_LABELS
static int tcg_out_ldst_finalize(TCGContext *s);
#endif
@@ -1655,7 +1656,7 @@ TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t off, const char *name)
return temp_tcgv_ptr(ts);
}
-static TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
+TCGTemp *tcg_temp_new_internal(TCGType type, TCGTempKind kind)
{
TCGContext *s = tcg_ctx;
TCGTemp *ts;
@@ -2482,7 +2483,9 @@ static const char * const cond_name[] =
[TCG_COND_LTU] = "ltu",
[TCG_COND_GEU] = "geu",
[TCG_COND_LEU] = "leu",
- [TCG_COND_GTU] = "gtu"
+ [TCG_COND_GTU] = "gtu",
+ [TCG_COND_TSTEQ] = "tsteq",
+ [TCG_COND_TSTNE] = "tstne",
};
static const char * const ldst_name[(MO_BSWAP | MO_SSIZE) + 1] =
@@ -4784,6 +4787,7 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
TCGTemp *ts;
TCGArg new_args[TCG_MAX_OP_ARGS];
int const_args[TCG_MAX_OP_ARGS];
+ TCGCond op_cond;
nb_oargs = def->nb_oargs;
nb_iargs = def->nb_iargs;
@@ -4796,6 +4800,33 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
i_allocated_regs = s->reserved_regs;
o_allocated_regs = s->reserved_regs;
+ switch (op->opc) {
+ case INDEX_op_brcond_i32:
+ case INDEX_op_brcond_i64:
+ op_cond = op->args[2];
+ break;
+ case INDEX_op_setcond_i32:
+ case INDEX_op_setcond_i64:
+ case INDEX_op_negsetcond_i32:
+ case INDEX_op_negsetcond_i64:
+ case INDEX_op_cmp_vec:
+ op_cond = op->args[3];
+ break;
+ case INDEX_op_brcond2_i32:
+ op_cond = op->args[4];
+ break;
+ case INDEX_op_movcond_i32:
+ case INDEX_op_movcond_i64:
+ case INDEX_op_setcond2_i32:
+ case INDEX_op_cmpsel_vec:
+ op_cond = op->args[5];
+ break;
+ default:
+ /* No condition within opcode. */
+ op_cond = TCG_COND_ALWAYS;
+ break;
+ }
+
/* satisfy input constraints */
for (k = 0; k < nb_iargs; k++) {
TCGRegSet i_preferred_regs, i_required_regs;
@@ -4809,7 +4840,8 @@ static void tcg_reg_alloc_op(TCGContext *s, const TCGOp *op)
ts = arg_temp(arg);
if (ts->val_type == TEMP_VAL_CONST
- && tcg_target_const_match(ts->val, ts->type, arg_ct->ct, TCGOP_VECE(op))) {
+ && tcg_target_const_match(ts->val, arg_ct->ct, ts->type,
+ op_cond, TCGOP_VECE(op))) {
/* constant is OK for instruction */
const_args[i] = 1;
new_args[i] = ts->val;
diff --git a/tcg/tci.c b/tcg/tci.c
index 3cc851b..39adcb7 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -228,6 +228,12 @@ static bool tci_compare32(uint32_t u0, uint32_t u1, TCGCond condition)
case TCG_COND_GTU:
result = (u0 > u1);
break;
+ case TCG_COND_TSTEQ:
+ result = (u0 & u1) == 0;
+ break;
+ case TCG_COND_TSTNE:
+ result = (u0 & u1) != 0;
+ break;
default:
g_assert_not_reached();
}
@@ -270,6 +276,12 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
case TCG_COND_GTU:
result = (u0 > u1);
break;
+ case TCG_COND_TSTEQ:
+ result = (u0 & u1) == 0;
+ break;
+ case TCG_COND_TSTNE:
+ result = (u0 & u1) != 0;
+ break;
default:
g_assert_not_reached();
}
@@ -1041,6 +1053,8 @@ static const char *str_c(TCGCond c)
[TCG_COND_GEU] = "geu",
[TCG_COND_LEU] = "leu",
[TCG_COND_GTU] = "gtu",
+ [TCG_COND_TSTEQ] = "tsteq",
+ [TCG_COND_TSTNE] = "tstne",
};
assert((unsigned)c < ARRAY_SIZE(cond));
diff --git a/tcg/tci/tcg-target.c.inc b/tcg/tci/tcg-target.c.inc
index 461f4b4..c740864 100644
--- a/tcg/tci/tcg-target.c.inc
+++ b/tcg/tci/tcg-target.c.inc
@@ -913,7 +913,8 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
}
/* Test if a constant matches the constraint. */
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
+static bool tcg_target_const_match(int64_t val, int ct,
+ TCGType type, TCGCond cond, int vece)
{
return ct & TCG_CT_CONST;
}
diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h
index 2a13816..a076f40 100644
--- a/tcg/tci/tcg-target.h
+++ b/tcg/tci/tcg-target.h
@@ -117,6 +117,8 @@
#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_tst 1
+
/* Number of registers available. */
#define TCG_TARGET_NB_REGS 16