aboutsummaryrefslogtreecommitdiff
path: root/tcg/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/sparc64')
-rw-r--r--tcg/sparc64/tcg-target-con-set.h9
-rw-r--r--tcg/sparc64/tcg-target-has.h65
-rw-r--r--tcg/sparc64/tcg-target.c.inc1301
3 files changed, 889 insertions, 486 deletions
diff --git a/tcg/sparc64/tcg-target-con-set.h b/tcg/sparc64/tcg-target-con-set.h
index 61f9fa3..1a57adc 100644
--- a/tcg/sparc64/tcg-target-con-set.h
+++ b/tcg/sparc64/tcg-target-con-set.h
@@ -11,10 +11,11 @@
*/
C_O0_I1(r)
C_O0_I2(rz, r)
-C_O0_I2(rz, rJ)
+C_O0_I2(r, rJ)
C_O1_I1(r, r)
C_O1_I2(r, r, r)
+C_O1_I2(r, r, rJ)
C_O1_I2(r, rz, rJ)
-C_O1_I4(r, rz, rJ, rI, 0)
-C_O2_I2(r, r, rz, rJ)
-C_O2_I4(r, r, rz, rz, rJ, rJ)
+C_O1_I2(r, rz, rz)
+C_O1_I4(r, r, rJ, rI, 0)
+C_O2_I2(r, r, r, r)
diff --git a/tcg/sparc64/tcg-target-has.h b/tcg/sparc64/tcg-target-has.h
index 2f46df8..b29fd17 100644
--- a/tcg/sparc64/tcg-target-has.h
+++ b/tcg/sparc64/tcg-target-has.h
@@ -7,74 +7,9 @@
#ifndef TCG_TARGET_HAS_H
#define TCG_TARGET_HAS_H
-#if defined(__VIS__) && __VIS__ >= 0x300
-#define use_vis3_instructions 1
-#else
-extern bool use_vis3_instructions;
-#endif
-
/* optional instructions */
-#define TCG_TARGET_HAS_div_i32 1
-#define TCG_TARGET_HAS_rem_i32 0
-#define TCG_TARGET_HAS_rot_i32 0
-#define TCG_TARGET_HAS_ext8s_i32 0
-#define TCG_TARGET_HAS_ext16s_i32 0
-#define TCG_TARGET_HAS_ext8u_i32 0
-#define TCG_TARGET_HAS_ext16u_i32 0
-#define TCG_TARGET_HAS_bswap16_i32 0
-#define TCG_TARGET_HAS_bswap32_i32 0
-#define TCG_TARGET_HAS_not_i32 1
-#define TCG_TARGET_HAS_andc_i32 1
-#define TCG_TARGET_HAS_orc_i32 1
-#define TCG_TARGET_HAS_eqv_i32 0
-#define TCG_TARGET_HAS_nand_i32 0
-#define TCG_TARGET_HAS_nor_i32 0
-#define TCG_TARGET_HAS_clz_i32 0
-#define TCG_TARGET_HAS_ctz_i32 0
-#define TCG_TARGET_HAS_ctpop_i32 0
-#define TCG_TARGET_HAS_extract2_i32 0
-#define TCG_TARGET_HAS_negsetcond_i32 1
-#define TCG_TARGET_HAS_add2_i32 1
-#define TCG_TARGET_HAS_sub2_i32 1
-#define TCG_TARGET_HAS_mulu2_i32 1
-#define TCG_TARGET_HAS_muls2_i32 1
-#define TCG_TARGET_HAS_muluh_i32 0
-#define TCG_TARGET_HAS_mulsh_i32 0
-#define TCG_TARGET_HAS_qemu_st8_i32 0
-
#define TCG_TARGET_HAS_extr_i64_i32 0
-#define TCG_TARGET_HAS_div_i64 1
-#define TCG_TARGET_HAS_rem_i64 0
-#define TCG_TARGET_HAS_rot_i64 0
-#define TCG_TARGET_HAS_ext8s_i64 0
-#define TCG_TARGET_HAS_ext16s_i64 0
-#define TCG_TARGET_HAS_ext32s_i64 1
-#define TCG_TARGET_HAS_ext8u_i64 0
-#define TCG_TARGET_HAS_ext16u_i64 0
-#define TCG_TARGET_HAS_ext32u_i64 1
-#define TCG_TARGET_HAS_bswap16_i64 0
-#define TCG_TARGET_HAS_bswap32_i64 0
-#define TCG_TARGET_HAS_bswap64_i64 0
-#define TCG_TARGET_HAS_not_i64 1
-#define TCG_TARGET_HAS_andc_i64 1
-#define TCG_TARGET_HAS_orc_i64 1
-#define TCG_TARGET_HAS_eqv_i64 0
-#define TCG_TARGET_HAS_nand_i64 0
-#define TCG_TARGET_HAS_nor_i64 0
-#define TCG_TARGET_HAS_clz_i64 0
-#define TCG_TARGET_HAS_ctz_i64 0
-#define TCG_TARGET_HAS_ctpop_i64 0
-#define TCG_TARGET_HAS_extract2_i64 0
-#define TCG_TARGET_HAS_negsetcond_i64 1
-#define TCG_TARGET_HAS_add2_i64 1
-#define TCG_TARGET_HAS_sub2_i64 1
-#define TCG_TARGET_HAS_mulu2_i64 0
-#define TCG_TARGET_HAS_muls2_i64 0
-#define TCG_TARGET_HAS_muluh_i64 use_vis3_instructions
-#define TCG_TARGET_HAS_mulsh_i64 0
-
#define TCG_TARGET_HAS_qemu_ldst_i128 0
-
#define TCG_TARGET_HAS_tst 1
#define TCG_TARGET_extract_valid(type, ofs, len) \
diff --git a/tcg/sparc64/tcg-target.c.inc b/tcg/sparc64/tcg-target.c.inc
index 7c722f5..9e004fb 100644
--- a/tcg/sparc64/tcg-target.c.inc
+++ b/tcg/sparc64/tcg-target.c.inc
@@ -199,7 +199,9 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
+#define ARITH_ADDCCC (INSN_OP(2) | INSN_OP3(0x18))
#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
+#define ARITH_SUBCCC (INSN_OP(2) | INSN_OP3(0x1c))
#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
@@ -208,9 +210,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
+#define ARITH_POPC (INSN_OP(2) | INSN_OP3(0x2e))
#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
+#define ARITH_ADDXCCC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x13))
#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
@@ -223,6 +227,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
+#define WRCCR (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(2))
#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
@@ -270,8 +275,11 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
-#ifndef use_vis3_instructions
-bool use_vis3_instructions;
+static bool use_popc_instructions;
+#if defined(__VIS__) && __VIS__ >= 0x300
+#define use_vis3_instructions 1
+#else
+static bool use_vis3_instructions;
#endif
static bool check_fit_i64(int64_t val, unsigned int bits)
@@ -366,7 +374,7 @@ static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
}
static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
- int32_t val2, int val2const, int op)
+ int32_t val2, int val2const, int op)
{
tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
| (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
@@ -596,21 +604,6 @@ static void tcg_out_sety(TCGContext *s, TCGReg rs)
tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
}
-static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
- int32_t val2, int val2const, int uns)
-{
- /* Load Y with the sign/zero extension of RS1 to 64-bits. */
- if (uns) {
- tcg_out_sety(s, TCG_REG_G0);
- } else {
- tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
- tcg_out_sety(s, TCG_REG_T1);
- }
-
- tcg_out_arithc(s, rd, rs1, val2, val2const,
- uns ? ARITH_UDIV : ARITH_SDIV);
-}
-
static const uint8_t tcg_cond_to_bcond[16] = {
[TCG_COND_EQ] = COND_E,
[TCG_COND_NE] = COND_NE,
@@ -652,6 +645,12 @@ static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
tcg_out_bpcc0(s, scond, flags, off19);
}
+static void tcg_out_br(TCGContext *s, TCGLabel *l)
+{
+ tcg_out_bpcc(s, COND_A, BPCC_PT, l);
+ tcg_out_nop(s);
+}
+
static void tcg_out_cmp(TCGContext *s, TCGCond cond,
TCGReg c1, int32_t c2, int c2const)
{
@@ -667,11 +666,10 @@ static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
tcg_out_nop(s);
}
-static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
+static void tcg_out_movcc(TCGContext *s, int scond, int cc, TCGReg ret,
int32_t v1, int v1const)
{
- tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
- | INSN_RS1(tcg_cond_to_bcond[cond])
+ tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret) | INSN_RS1(scond)
| (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
}
@@ -680,7 +678,7 @@ static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
int32_t v1, int v1const)
{
tcg_out_cmp(s, cond, c1, c2, c2const);
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_ICC, ret, v1, v1const);
}
static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
@@ -724,12 +722,12 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
tcg_out_movr(s, rcond, ret, c1, v1, v1const);
} else {
tcg_out_cmp(s, cond, c1, c2, c2const);
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond], MOVCC_XCC, ret, v1, v1const);
}
}
static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const, bool neg)
+ TCGReg c1, int32_t c2, bool c2const, bool neg)
{
/* For 32-bit comparisons, we can play games with ADDC/SUBC. */
switch (cond) {
@@ -749,7 +747,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
}
c1 = TCG_REG_G0, c2const = 0;
cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
- break;
+ break;
case TCG_COND_TSTEQ:
case TCG_COND_TSTNE:
@@ -758,7 +756,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
c1 = TCG_REG_G0;
c2 = TCG_REG_T1, c2const = 0;
cond = (cond == TCG_COND_TSTEQ ? TCG_COND_GEU : TCG_COND_LTU);
- break;
+ break;
case TCG_COND_GTU:
case TCG_COND_LEU:
@@ -778,7 +776,8 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
default:
tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_ICC, ret, neg ? -1 : 1, 1);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond],
+ MOVCC_ICC, ret, neg ? -1 : 1, 1);
return;
}
@@ -803,7 +802,7 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
}
static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
- TCGReg c1, int32_t c2, int c2const, bool neg)
+ TCGReg c1, int32_t c2, bool c2const, bool neg)
{
int rcond;
@@ -833,78 +832,103 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
} else {
tcg_out_cmp(s, cond, c1, c2, c2const);
tcg_out_movi_s13(s, ret, 0);
- tcg_out_movcc(s, cond, MOVCC_XCC, ret, neg ? -1 : 1, 1);
+ tcg_out_movcc(s, tcg_cond_to_bcond[cond],
+ MOVCC_XCC, ret, neg ? -1 : 1, 1);
}
}
-static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
- TCGReg al, TCGReg ah, int32_t bl, int blconst,
- int32_t bh, int bhconst, int opl, int oph)
+static void tcg_out_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGArg arg2, bool const_arg2,
+ TCGLabel *l)
{
- TCGReg tmp = TCG_REG_T1;
-
- /* Note that the low parts are fully consumed before tmp is set. */
- if (rl != ah && (bhconst || rl != bh)) {
- tmp = rl;
+ if (type == TCG_TYPE_I32) {
+ tcg_out_brcond_i32(s, cond, arg1, arg2, const_arg2, l);
+ } else {
+ tcg_out_brcond_i64(s, cond, arg1, arg2, const_arg2, l);
}
+}
- tcg_out_arithc(s, tmp, al, bl, blconst, opl);
- tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
- tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
+static void tgen_brcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, TCGReg arg2, TCGLabel *l)
+{
+ tcg_out_brcond(s, type, cond, arg1, arg2, false, l);
}
-static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
- TCGReg al, TCGReg ah, int32_t bl, int blconst,
- int32_t bh, int bhconst, bool is_sub)
+static void tgen_brcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg arg1, tcg_target_long arg2, TCGLabel *l)
{
- TCGReg tmp = TCG_REG_T1;
+ tcg_out_brcond(s, type, cond, arg1, arg2, true, l);
+}
+
+static const TCGOutOpBrcond outop_brcond = {
+ .base.static_constraint = C_O0_I2(r, rJ),
+ .out_rr = tgen_brcond,
+ .out_ri = tgen_brcondi,
+};
- /* Note that the low parts are fully consumed before tmp is set. */
- if (rl != ah && (bhconst || rl != bh)) {
- tmp = rl;
+static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1,
+ TCGArg c2, bool c2const, bool neg)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_setcond_i32(s, cond, ret, c1, c2, c2const, neg);
+ } else {
+ tcg_out_setcond_i64(s, cond, ret, c1, c2, c2const, neg);
}
+}
- tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
+static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, false);
+}
- if (use_vis3_instructions && !is_sub) {
- /* Note that ADDXC doesn't accept immediates. */
- if (bhconst && bh != 0) {
- tcg_out_movi_s13(s, TCG_REG_T2, bh);
- bh = TCG_REG_T2;
- }
- tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
- } else if (bh == TCG_REG_G0) {
- /* If we have a zero, we can perform the operation in two insns,
- with the arithmetic first, and a conditional move into place. */
- if (rh == ah) {
- tcg_out_arithi(s, TCG_REG_T2, ah, 1,
- is_sub ? ARITH_SUB : ARITH_ADD);
- tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
- } else {
- tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
- tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
- }
+static void tgen_setcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, false);
+}
+
+static const TCGOutOpSetcond outop_setcond = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_setcond,
+ .out_rri = tgen_setcondi,
+};
+
+static void tgen_negsetcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, TCGReg arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, false, true);
+}
+
+static void tgen_negsetcondi(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg dest, TCGReg arg1, tcg_target_long arg2)
+{
+ tcg_out_setcond(s, type, cond, dest, arg1, arg2, true, true);
+}
+
+static const TCGOutOpSetcond outop_negsetcond = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_negsetcond,
+ .out_rri = tgen_negsetcondi,
+};
+
+static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond,
+ TCGReg ret, TCGReg c1, TCGArg c2, bool c2const,
+ TCGArg v1, bool v1const, TCGArg v2, bool v2consf)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_movcond_i32(s, cond, ret, c1, c2, c2const, v1, v1const);
} else {
- /*
- * Otherwise adjust BH as if there is carry into T2.
- * Note that constant BH is constrained to 11 bits for the MOVCC,
- * so the adjustment fits 12 bits.
- */
- if (bhconst) {
- tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
- } else {
- tcg_out_arithi(s, TCG_REG_T2, bh, 1,
- is_sub ? ARITH_SUB : ARITH_ADD);
- }
- /* ... smoosh T2 back to original BH if carry is clear ... */
- tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
- /* ... and finally perform the arithmetic with the new operand. */
- tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
+ tcg_out_movcond_i64(s, cond, ret, c1, c2, c2const, v1, v1const);
}
-
- tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
}
+static const TCGOutOpMovcond outop_movcond = {
+ .base.static_constraint = C_O1_I4(r, r, rJ, rI, 0),
+ .out = tgen_movcond,
+};
+
static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
bool in_prologue, bool tail_call)
{
@@ -935,7 +959,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
tcg_out_nop(s);
}
-static void tcg_out_mb(TCGContext *s, TCGArg a0)
+static void tcg_out_mb(TCGContext *s, unsigned a0)
{
/* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
@@ -1166,8 +1190,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
return ldst;
}
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
- MemOpIdx oi, TCGType data_type)
+static void tgen_qemu_ld(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
{
static const int ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
[MO_UB] = LDUB,
@@ -1199,14 +1223,23 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
ld_opc[get_memop(oi) & (MO_BSWAP | MO_SSIZE)]);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
- MemOpIdx oi, TCGType data_type)
+static const TCGOutOpQemuLdSt outop_qemu_ld = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_qemu_ld,
+};
+
+static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_qemu_st(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg addr, MemOpIdx oi)
{
static const int st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
[MO_UB] = STB,
@@ -1229,12 +1262,21 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
st_opc[get_memop(oi) & (MO_BSWAP | MO_SIZE)]);
if (ldst) {
- ldst->type = data_type;
+ ldst->type = type;
ldst->datalo_reg = data;
ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
}
+static const TCGOutOpQemuLdSt outop_qemu_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out = tgen_qemu_st,
+};
+
+static const TCGOutOpQemuLdSt2 outop_qemu_st2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
{
if (check_fit_ptr(a0, 13)) {
@@ -1280,369 +1322,794 @@ static void tcg_out_goto_tb(TCGContext *s, int which)
}
}
+static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0)
+{
+ tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
+ tcg_out_mov_delay(s, TCG_REG_TB, a0);
+}
+
void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
uintptr_t jmp_rx, uintptr_t jmp_rw)
{
}
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, TCGType type,
- const TCGArg args[TCG_MAX_OP_ARGS],
- const int const_args[TCG_MAX_OP_ARGS])
+
+static void tgen_add(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- TCGArg a0, a1, a2;
- int c, c2;
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADD);
+}
- /* Hoist the loads of the most common arguments. */
- a0 = args[0];
- a1 = args[1];
- a2 = args[2];
- c2 = const_args[2];
+static void tgen_addi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADD);
+}
- switch (opc) {
- case INDEX_op_goto_ptr:
- tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
- tcg_out_mov_delay(s, TCG_REG_TB, a0);
- break;
- case INDEX_op_br:
- tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
- tcg_out_nop(s);
- break;
+static const TCGOutOpBinary outop_add = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_add,
+ .out_rri = tgen_addi,
+};
-#define OP_32_64(x) \
- glue(glue(case INDEX_op_, x), _i32): \
- glue(glue(case INDEX_op_, x), _i64)
+static void tgen_addco_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDCC);
+}
- OP_32_64(ld8u):
- tcg_out_ldst(s, a0, a1, a2, LDUB);
- break;
- OP_32_64(ld8s):
- tcg_out_ldst(s, a0, a1, a2, LDSB);
- break;
- OP_32_64(ld16u):
- tcg_out_ldst(s, a0, a1, a2, LDUH);
- break;
- OP_32_64(ld16s):
- tcg_out_ldst(s, a0, a1, a2, LDSH);
- break;
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- tcg_out_ldst(s, a0, a1, a2, LDUW);
- break;
- OP_32_64(st8):
- tcg_out_ldst(s, a0, a1, a2, STB);
- break;
- OP_32_64(st16):
- tcg_out_ldst(s, a0, a1, a2, STH);
- break;
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- tcg_out_ldst(s, a0, a1, a2, STW);
- break;
- OP_32_64(add):
- c = ARITH_ADD;
- goto gen_arith;
- OP_32_64(sub):
- c = ARITH_SUB;
- goto gen_arith;
- OP_32_64(and):
- c = ARITH_AND;
- goto gen_arith;
- OP_32_64(andc):
- c = ARITH_ANDN;
- goto gen_arith;
- OP_32_64(or):
- c = ARITH_OR;
- goto gen_arith;
- OP_32_64(orc):
- c = ARITH_ORN;
- goto gen_arith;
- OP_32_64(xor):
- c = ARITH_XOR;
- goto gen_arith;
- case INDEX_op_shl_i32:
- c = SHIFT_SLL;
- do_shift32:
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
- break;
- case INDEX_op_shr_i32:
- c = SHIFT_SRL;
- goto do_shift32;
- case INDEX_op_sar_i32:
- c = SHIFT_SRA;
- goto do_shift32;
- case INDEX_op_mul_i32:
- c = ARITH_UMUL;
- goto gen_arith;
-
- OP_32_64(neg):
- c = ARITH_SUB;
- goto gen_arith1;
- OP_32_64(not):
- c = ARITH_ORN;
- goto gen_arith1;
-
- case INDEX_op_div_i32:
- tcg_out_div32(s, a0, a1, a2, c2, 0);
- break;
- case INDEX_op_divu_i32:
- tcg_out_div32(s, a0, a1, a2, c2, 1);
- break;
+static void tgen_addco_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCC);
+}
- case INDEX_op_brcond_i32:
- tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
- break;
- case INDEX_op_setcond_i32:
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, false);
- break;
- case INDEX_op_negsetcond_i32:
- tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2, true);
- break;
- case INDEX_op_movcond_i32:
- tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
- break;
+static const TCGOutOpBinary outop_addco = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_addco_rrr,
+ .out_rri = tgen_addco_rri,
+};
- case INDEX_op_add2_i32:
- tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
- args[4], const_args[4], args[5], const_args[5],
- ARITH_ADDCC, ARITH_ADDC);
- break;
- case INDEX_op_sub2_i32:
- tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
- args[4], const_args[4], args[5], const_args[5],
- ARITH_SUBCC, ARITH_SUBC);
- break;
- case INDEX_op_mulu2_i32:
- c = ARITH_UMUL;
- goto do_mul2;
- case INDEX_op_muls2_i32:
- c = ARITH_SMUL;
- do_mul2:
- /* The 32-bit multiply insns produce a full 64-bit result. */
- tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
- tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
- break;
+static void tgen_addci_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDC);
+ } else if (use_vis3_instructions) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDXC);
+ } else {
+ tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */
+ /* Select the correct result based on actual carry value. */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
+ }
+}
- case INDEX_op_qemu_ld_i32:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_ld_i64:
- tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
- break;
- case INDEX_op_qemu_st_i32:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
- break;
- case INDEX_op_qemu_st_i64:
- tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
- break;
+static void tgen_addci_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDC);
+ return;
+ }
+ /* !use_vis3_instructions */
+ if (a2 != 0) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_ADD); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_ADD); /* for CS */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else if (a0 == a1) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_ADD);
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else {
+ tcg_out_arithi(s, a0, a1, 1, ARITH_ADD);
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
+ }
+}
- case INDEX_op_ld32s_i64:
- tcg_out_ldst(s, a0, a1, a2, LDSW);
- break;
- case INDEX_op_ld_i64:
- tcg_out_ldst(s, a0, a1, a2, LDX);
- break;
- case INDEX_op_st_i64:
- tcg_out_ldst(s, a0, a1, a2, STX);
- break;
- case INDEX_op_shl_i64:
- c = SHIFT_SLLX;
- do_shift64:
- /* Limit immediate shift count lest we create an illegal insn. */
- tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
- break;
- case INDEX_op_shr_i64:
- c = SHIFT_SRLX;
- goto do_shift64;
- case INDEX_op_sar_i64:
- c = SHIFT_SRAX;
- goto do_shift64;
- case INDEX_op_mul_i64:
- c = ARITH_MULX;
- goto gen_arith;
- case INDEX_op_div_i64:
- c = ARITH_SDIVX;
- goto gen_arith;
- case INDEX_op_divu_i64:
- c = ARITH_UDIVX;
- goto gen_arith;
-
- case INDEX_op_brcond_i64:
- tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
- break;
- case INDEX_op_setcond_i64:
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, false);
- break;
- case INDEX_op_negsetcond_i64:
- tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2, true);
- break;
- case INDEX_op_movcond_i64:
- tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
- break;
- case INDEX_op_add2_i64:
- tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
- const_args[4], args[5], const_args[5], false);
- break;
- case INDEX_op_sub2_i64:
- tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
- const_args[4], args[5], const_args[5], true);
- break;
- case INDEX_op_muluh_i64:
- tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
- break;
+static TCGConstraintSetIndex cset_addci(TCGType type, unsigned flags)
+{
+ if (use_vis3_instructions && type == TCG_TYPE_I64) {
+ /* Note that ADDXC doesn't accept immediates. */
+ return C_O1_I2(r, rz, rz);
+ }
+ return C_O1_I2(r, rz, rJ);
+}
- gen_arith:
- tcg_out_arithc(s, a0, a1, a2, c2, c);
- break;
+static const TCGOutOpAddSubCarry outop_addci = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addci,
+ .out_rrr = tgen_addci_rrr,
+ .out_rri = tgen_addci_rri,
+};
- gen_arith1:
- tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
- break;
+/* Copy %xcc.c to %icc.c */
+static void tcg_out_dup_xcc_c(TCGContext *s)
+{
+ if (use_vis3_instructions) {
+ tcg_out_arith(s, TCG_REG_T1, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
+ } else {
+ tcg_out_movi_s13(s, TCG_REG_T1, 0);
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, TCG_REG_T1, 1, true);
+ }
+ /* Write carry-in into %icc via {0,1} + -1. */
+ tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, -1, ARITH_ADDCC);
+}
- case INDEX_op_mb:
- tcg_out_mb(s, a0);
- break;
+static void tgen_addcio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ if (use_vis3_instructions) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDXCCC);
+ return;
+ }
+ tcg_out_dup_xcc_c(s);
+ }
+ tcg_out_arith(s, a0, a1, a2, ARITH_ADDCCC);
+}
- case INDEX_op_extract_i64:
- tcg_debug_assert(a2 + args[3] == 32);
- tcg_out_arithi(s, a0, a1, a2, SHIFT_SRL);
- break;
- case INDEX_op_sextract_i64:
- tcg_debug_assert(a2 + args[3] == 32);
- tcg_out_arithi(s, a0, a1, a2, SHIFT_SRA);
- break;
+static void tgen_addcio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type != TCG_TYPE_I32) {
+ /* !use_vis3_instructions */
+ tcg_out_dup_xcc_c(s);
+ }
+ tcg_out_arithi(s, a0, a1, a2, ARITH_ADDCCC);
+}
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
- case INDEX_op_mov_i64:
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
- case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
- case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
- case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
- case INDEX_op_ext8s_i64:
- case INDEX_op_ext8u_i32:
- case INDEX_op_ext8u_i64:
- case INDEX_op_ext16s_i32:
- case INDEX_op_ext16s_i64:
- case INDEX_op_ext16u_i32:
- case INDEX_op_ext16u_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- default:
- g_assert_not_reached();
+static TCGConstraintSetIndex cset_addcio(TCGType type, unsigned flags)
+{
+ if (use_vis3_instructions && type == TCG_TYPE_I64) {
+ /* Note that ADDXCCC doesn't accept immediates. */
+ return C_O1_I2(r, rz, rz);
}
+ return C_O1_I2(r, rz, rJ);
}
-static TCGConstraintSetIndex
-tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
+static const TCGOutOpBinary outop_addcio = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_addcio,
+ .out_rrr = tgen_addcio_rrr,
+ .out_rri = tgen_addcio_rri,
+};
+
+static void tcg_out_set_carry(TCGContext *s)
+{
+ /* 0x11 -> xcc = nzvC, icc = nzvC */
+ tcg_out_arithi(s, 0, TCG_REG_G0, 0x11, WRCCR);
+}
+
+static void tgen_and(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_AND);
+}
+
+static void tgen_andi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_AND);
+}
+
+static const TCGOutOpBinary outop_and = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_and,
+ .out_rri = tgen_andi,
+};
+
+static void tgen_andc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
{
- switch (op) {
- case INDEX_op_goto_ptr:
- return C_O0_I1(r);
-
- case INDEX_op_ld8u_i32:
- case INDEX_op_ld8u_i64:
- case INDEX_op_ld8s_i32:
- case INDEX_op_ld8s_i64:
- case INDEX_op_ld16u_i32:
- case INDEX_op_ld16u_i64:
- case INDEX_op_ld16s_i32:
- case INDEX_op_ld16s_i64:
- case INDEX_op_ld_i32:
- case INDEX_op_ld32u_i64:
- case INDEX_op_ld32s_i64:
- case INDEX_op_ld_i64:
- case INDEX_op_neg_i32:
- case INDEX_op_neg_i64:
- case INDEX_op_not_i32:
- case INDEX_op_not_i64:
- case INDEX_op_ext32s_i64:
- case INDEX_op_ext32u_i64:
- case INDEX_op_ext_i32_i64:
- case INDEX_op_extu_i32_i64:
- case INDEX_op_extract_i64:
- case INDEX_op_sextract_i64:
- case INDEX_op_qemu_ld_i32:
- case INDEX_op_qemu_ld_i64:
+ tcg_out_arith(s, a0, a1, a2, ARITH_ANDN);
+}
+
+static const TCGOutOpBinary outop_andc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_andc,
+};
+
+static const TCGOutOpBinary outop_clz = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_ctpop(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tcg_out_arith(s, a0, TCG_REG_G0, a1, ARITH_POPC);
+}
+
+static TCGConstraintSetIndex cset_ctpop(TCGType type, unsigned flags)
+{
+ if (use_popc_instructions && type == TCG_TYPE_I64) {
return C_O1_I1(r, r);
+ }
+ return C_NotImplemented;
+}
- case INDEX_op_st8_i32:
- case INDEX_op_st8_i64:
- case INDEX_op_st16_i32:
- case INDEX_op_st16_i64:
- case INDEX_op_st_i32:
- case INDEX_op_st32_i64:
- case INDEX_op_st_i64:
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_st_i64:
- return C_O0_I2(rz, r);
-
- case INDEX_op_add_i32:
- case INDEX_op_add_i64:
- case INDEX_op_mul_i32:
- case INDEX_op_mul_i64:
- case INDEX_op_div_i32:
- case INDEX_op_div_i64:
- case INDEX_op_divu_i32:
- case INDEX_op_divu_i64:
- case INDEX_op_sub_i32:
- case INDEX_op_sub_i64:
- case INDEX_op_and_i32:
- case INDEX_op_and_i64:
- case INDEX_op_andc_i32:
- case INDEX_op_andc_i64:
- case INDEX_op_or_i32:
- case INDEX_op_or_i64:
- case INDEX_op_orc_i32:
- case INDEX_op_orc_i64:
- case INDEX_op_xor_i32:
- case INDEX_op_xor_i64:
- case INDEX_op_shl_i32:
- case INDEX_op_shl_i64:
- case INDEX_op_shr_i32:
- case INDEX_op_shr_i64:
- case INDEX_op_sar_i32:
- case INDEX_op_sar_i64:
- case INDEX_op_setcond_i32:
- case INDEX_op_setcond_i64:
- case INDEX_op_negsetcond_i32:
- case INDEX_op_negsetcond_i64:
- return C_O1_I2(r, rz, rJ);
-
- case INDEX_op_brcond_i32:
- case INDEX_op_brcond_i64:
- return C_O0_I2(rz, rJ);
- case INDEX_op_movcond_i32:
- case INDEX_op_movcond_i64:
- return C_O1_I4(r, rz, rJ, rI, 0);
- case INDEX_op_add2_i32:
- case INDEX_op_add2_i64:
- case INDEX_op_sub2_i32:
- case INDEX_op_sub2_i64:
- return C_O2_I4(r, r, rz, rz, rJ, rJ);
- case INDEX_op_mulu2_i32:
- case INDEX_op_muls2_i32:
- return C_O2_I2(r, r, rz, rJ);
- case INDEX_op_muluh_i64:
- return C_O1_I2(r, r, r);
+static const TCGOutOpUnary outop_ctpop = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_ctpop,
+ .out_rr = tgen_ctpop,
+};
- default:
- return C_NotImplemented;
+static const TCGOutOpBinary outop_ctz = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divs_rJ(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
+{
+ uint32_t insn;
+
+ if (type == TCG_TYPE_I32) {
+ /* Load Y with the sign extension of a1 to 64-bits. */
+ tcg_out_arithi(s, TCG_REG_T1, a1, 31, SHIFT_SRA);
+ tcg_out_sety(s, TCG_REG_T1);
+ insn = ARITH_SDIV;
+ } else {
+ insn = ARITH_SDIVX;
+ }
+ tcg_out_arithc(s, a0, a1, a2, c2, insn);
+}
+
+static void tgen_divs(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_divs_rJ(s, type, a0, a1, a2, false);
+}
+
+static void tgen_divsi(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_divs_rJ(s, type, a0, a1, a2, true);
+}
+
+static const TCGOutOpBinary outop_divs = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_divs,
+ .out_rri = tgen_divsi,
+};
+
+static const TCGOutOpDivRem outop_divs2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_divu_rJ(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGArg a2, bool c2)
+{
+ uint32_t insn;
+
+ if (type == TCG_TYPE_I32) {
+ /* Load Y with the zero extension to 64-bits. */
+ tcg_out_sety(s, TCG_REG_G0);
+ insn = ARITH_UDIV;
+ } else {
+ insn = ARITH_UDIVX;
+ }
+ tcg_out_arithc(s, a0, a1, a2, c2, insn);
+}
+
+static void tgen_divu(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tgen_divu_rJ(s, type, a0, a1, a2, false);
+}
+
+static void tgen_divui(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tgen_divu_rJ(s, type, a0, a1, a2, true);
+}
+
+static const TCGOutOpBinary outop_divu = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_divu,
+ .out_rri = tgen_divui,
+};
+
+static const TCGOutOpDivRem outop_divu2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_eqv = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1)
+{
+ tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
+}
+
+static const TCGOutOpUnary outop_extrh_i64_i32 = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extrh_i64_i32,
+};
+
+static void tgen_mul(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_muli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? ARITH_UMUL : ARITH_MULX;
+ tcg_out_arithi(s, a0, a1, a2, insn);
+}
+
+static const TCGOutOpBinary outop_mul = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_mul,
+ .out_rri = tgen_muli,
+};
+
+/*
+ * The 32-bit multiply insns produce a full 64-bit result.
+ * Supporting 32-bit mul[us]2 opcodes avoids sign/zero-extensions
+ * before the actual multiply; we only need extract the high part
+ * into the separate operand.
+ */
+static TCGConstraintSetIndex cset_mul2(TCGType type, unsigned flags)
+{
+ return type == TCG_TYPE_I32 ? C_O2_I2(r, r, r, r) : C_NotImplemented;
+}
+
+static void tgen_muls2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_out_arith(s, a0, a2, a3, ARITH_SMUL);
+ tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
+}
+
+static const TCGOutOpMul2 outop_muls2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_muls2,
+};
+
+static const TCGOutOpBinary outop_mulsh = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_mulu2(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2, TCGReg a3)
+{
+ tcg_out_arith(s, a0, a2, a3, ARITH_UMUL);
+ tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
+}
+
+static const TCGOutOpMul2 outop_mulu2 = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_mul2,
+ .out_rrrr = tgen_mulu2,
+};
+
+static void tgen_muluh(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_UMULXHI);
+}
+
+static TCGConstraintSetIndex cset_muluh(TCGType type, unsigned flags)
+{
+ return (type == TCG_TYPE_I64 && use_vis3_instructions
+ ? C_O1_I2(r, r, r) : C_NotImplemented);
+}
+
+static const TCGOutOpBinary outop_muluh = {
+ .base.static_constraint = C_Dynamic,
+ .base.dynamic_constraint = cset_muluh,
+ .out_rrr = tgen_muluh,
+};
+
+static const TCGOutOpBinary outop_nand = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_nor = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_or(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_OR);
+}
+
+static void tgen_ori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_OR);
+}
+
+static const TCGOutOpBinary outop_or = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_or,
+ .out_rri = tgen_ori,
+};
+
+static void tgen_orc(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_ORN);
+}
+
+static const TCGOutOpBinary outop_orc = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_orc,
+};
+
+static const TCGOutOpBinary outop_rems = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_remu = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_rotl = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBinary outop_rotr = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_sar(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_sari(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRA : SHIFT_SRAX;
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
+}
+
+static const TCGOutOpBinary outop_sar = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_sar,
+ .out_rri = tgen_sari,
+};
+
+static void tgen_shl(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_shli(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SLL : SHIFT_SLLX;
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
+}
+
+static const TCGOutOpBinary outop_shl = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_shl,
+ .out_rri = tgen_shli,
+};
+
+static void tgen_shr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
+ tcg_out_arith(s, a0, a1, a2, insn);
+}
+
+static void tgen_shri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ uint32_t insn = type == TCG_TYPE_I32 ? SHIFT_SRL : SHIFT_SRLX;
+ uint32_t mask = type == TCG_TYPE_I32 ? 31 : 63;
+ tcg_out_arithi(s, a0, a1, a2 & mask, insn);
+}
+
+static const TCGOutOpBinary outop_shr = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_shr,
+ .out_rri = tgen_shri,
+};
+
+static void tgen_sub(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUB);
+}
+
+static const TCGOutOpSubtract outop_sub = {
+ .base.static_constraint = C_O1_I2(r, r, r),
+ .out_rrr = tgen_sub,
+};
+
+static void tgen_subbo_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBCC);
+}
+
+static void tgen_subbo_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCC);
+}
+
+static const TCGOutOpAddSubCarry outop_subbo = {
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
+ .out_rrr = tgen_subbo_rrr,
+ .out_rri = tgen_subbo_rri,
+};
+
+static void tgen_subbi_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ /* TODO: OSA 2015 added SUBXC */
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBC);
+ } else {
+ tcg_out_arith(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */
+ /* Select the correct result based on actual borrow value. */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
}
}
+static void tgen_subbi_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type == TCG_TYPE_I32) {
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBC);
+ } else if (a2 != 0) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, a2, ARITH_SUB); /* for CC */
+ tcg_out_arithi(s, a0, TCG_REG_T1, 1, ARITH_SUB); /* for CS */
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else if (a0 == a1) {
+ tcg_out_arithi(s, TCG_REG_T1, a1, 1, ARITH_SUB);
+ tcg_out_movcc(s, COND_CS, MOVCC_XCC, a0, TCG_REG_T1, false);
+ } else {
+ tcg_out_arithi(s, a0, a1, 1, ARITH_SUB);
+ tcg_out_movcc(s, COND_CC, MOVCC_XCC, a0, a1, false);
+ }
+}
+
+static const TCGOutOpAddSubCarry outop_subbi = {
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
+ .out_rrr = tgen_subbi_rrr,
+ .out_rri = tgen_subbi_rri,
+};
+
+static void tgen_subbio_rrr(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ if (type != TCG_TYPE_I32) {
+ /* TODO: OSA 2015 added SUBXCCC */
+ tcg_out_dup_xcc_c(s);
+ }
+ tcg_out_arith(s, a0, a1, a2, ARITH_SUBCCC);
+}
+
+static void tgen_subbio_rri(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ if (type != TCG_TYPE_I32) {
+ tcg_out_dup_xcc_c(s);
+ }
+ tcg_out_arithi(s, a0, a1, a2, ARITH_SUBCCC);
+}
+
+static const TCGOutOpAddSubCarry outop_subbio = {
+ .base.static_constraint = C_O1_I2(r, rz, rJ),
+ .out_rrr = tgen_subbio_rrr,
+ .out_rri = tgen_subbio_rri,
+};
+
+static void tcg_out_set_borrow(TCGContext *s)
+{
+ tcg_out_set_carry(s); /* borrow == carry */
+}
+
+static void tgen_xor(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, TCGReg a2)
+{
+ tcg_out_arith(s, a0, a1, a2, ARITH_XOR);
+}
+
+static void tgen_xori(TCGContext *s, TCGType type,
+ TCGReg a0, TCGReg a1, tcg_target_long a2)
+{
+ tcg_out_arithi(s, a0, a1, a2, ARITH_XOR);
+}
+
+static const TCGOutOpBinary outop_xor = {
+ .base.static_constraint = C_O1_I2(r, r, rJ),
+ .out_rrr = tgen_xor,
+ .out_rri = tgen_xori,
+};
+
+static const TCGOutOpBswap outop_bswap16 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpBswap outop_bswap32 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static const TCGOutOpUnary outop_bswap64 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_sub(s, type, a0, TCG_REG_G0, a1);
+}
+
+static const TCGOutOpUnary outop_neg = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_neg,
+};
+
+static void tgen_not(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1)
+{
+ tgen_orc(s, type, a0, TCG_REG_G0, a1);
+}
+
+static const TCGOutOpUnary outop_not = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_not,
+};
+
+static const TCGOutOpDeposit outop_deposit = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_extract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ tcg_debug_assert(ofs + len == 32);
+ tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRL);
+}
+
+static const TCGOutOpExtract outop_extract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_extract,
+};
+
+static void tgen_sextract(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1,
+ unsigned ofs, unsigned len)
+{
+ tcg_debug_assert(ofs + len == 32);
+ tcg_out_arithi(s, a0, a1, ofs, SHIFT_SRA);
+}
+
+static const TCGOutOpExtract outop_sextract = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out_rr = tgen_sextract,
+};
+
+static const TCGOutOpExtract2 outop_extract2 = {
+ .base.static_constraint = C_NotImplemented,
+};
+
+static void tgen_ld8u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDUB);
+}
+
+static const TCGOutOpLoad outop_ld8u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8u,
+};
+
+static void tgen_ld8s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDSB);
+}
+
+static const TCGOutOpLoad outop_ld8s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld8s,
+};
+
+static void tgen_ld16u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDUH);
+}
+
+static const TCGOutOpLoad outop_ld16u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16u,
+};
+
+static void tgen_ld16s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDSH);
+}
+
+static const TCGOutOpLoad outop_ld16s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld16s,
+};
+
+static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDUW);
+}
+
+static const TCGOutOpLoad outop_ld32u = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32u,
+};
+
+static void tgen_ld32s(TCGContext *s, TCGType type, TCGReg dest,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, dest, base, offset, LDSW);
+}
+
+static const TCGOutOpLoad outop_ld32s = {
+ .base.static_constraint = C_O1_I1(r, r),
+ .out = tgen_ld32s,
+};
+
+static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, data, base, offset, STB);
+}
+
+static const TCGOutOpStore outop_st8 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st8_r,
+};
+
+static void tgen_st16_r(TCGContext *s, TCGType type, TCGReg data,
+ TCGReg base, ptrdiff_t offset)
+{
+ tcg_out_ldst(s, data, base, offset, STH);
+}
+
+static const TCGOutOpStore outop_st16 = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tgen_st16_r,
+};
+
+static const TCGOutOpStore outop_st = {
+ .base.static_constraint = C_O0_I2(rz, r),
+ .out_r = tcg_out_st,
+};
+
+
+static TCGConstraintSetIndex
+tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags)
+{
+ return C_NotImplemented;
+}
+
static void tcg_target_init(TCGContext *s)
{
+ unsigned long hwcap = qemu_getauxval(AT_HWCAP);
+
/*
* Only probe for the platform and capabilities if we haven't already
* determined maximum values at compile time.
*/
+ use_popc_instructions = (hwcap & HWCAP_SPARC_POPC) != 0;
#ifndef use_vis3_instructions
- {
- unsigned long hwcap = qemu_getauxval(AT_HWCAP);
- use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
- }
+ use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
#endif
tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;