From 42281ec646f906aaa63e28daf2f6ba7ca2dd7caf Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:34 +0200 Subject: tcg/ppc: Introduce Altivec registers Altivec supports 32 128-bit vector registers, whose names are by convention v0 through v31. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 11 +++++- tcg/ppc/tcg-target.inc.c | 88 ++++++++++++++++++++++++++++++------------------ 2 files changed, 65 insertions(+), 34 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 7627fb6..690fa74 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -31,7 +31,7 @@ # define TCG_TARGET_REG_BITS 32 #endif -#define TCG_TARGET_NB_REGS 32 +#define TCG_TARGET_NB_REGS 64 #define TCG_TARGET_INSN_UNIT_SIZE 4 #define TCG_TARGET_TLB_DISPLACEMENT_BITS 16 @@ -45,6 +45,15 @@ typedef enum { TCG_REG_R24, TCG_REG_R25, TCG_REG_R26, TCG_REG_R27, TCG_REG_R28, TCG_REG_R29, TCG_REG_R30, TCG_REG_R31, + TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, + TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, + TCG_REG_CALL_STACK = TCG_REG_R1, TCG_AREG0 = TCG_REG_R27 } TCGReg; diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 815edac..9d678c3 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -42,6 +42,9 @@ # define TCG_REG_TMP1 TCG_REG_R12 #endif +#define TCG_VEC_TMP1 TCG_REG_V0 +#define TCG_VEC_TMP2 TCG_REG_V1 + #define TCG_REG_TB TCG_REG_R31 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64) @@ -72,39 +75,15 @@ bool have_isa_3_00; #endif #ifdef CONFIG_DEBUG_TCG -static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "r0", - "r1", - "r2", - "r3", - "r4", - "r5", - "r6", - "r7", - "r8", - "r9", - "r10", - "r11", - "r12", - "r13", - "r14", - "r15", - "r16", - "r17", - "r18", - "r19", - "r20", - "r21", - "r22", - "r23", - "r24", - "r25", - "r26", - "r27", - "r28", - "r29", - "r30", - "r31" +static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", + "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", + "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", }; #endif @@ -139,6 +118,26 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_R5, TCG_REG_R4, TCG_REG_R3, + + /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */ + TCG_REG_V2, /* call clobbered, vectors */ + TCG_REG_V3, + TCG_REG_V4, + TCG_REG_V5, + TCG_REG_V6, + TCG_REG_V7, + TCG_REG_V8, + TCG_REG_V9, + TCG_REG_V10, + TCG_REG_V11, + TCG_REG_V12, + TCG_REG_V13, + TCG_REG_V14, + TCG_REG_V15, + TCG_REG_V16, + TCG_REG_V17, + TCG_REG_V18, + TCG_REG_V19, }; static const int tcg_target_call_iarg_regs[] = { @@ -2808,6 +2807,27 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11); tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */ @@ -2818,6 +2838,8 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ #endif tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */ + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1); + tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2); if (USE_REG_TB) { tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */ } -- cgit v1.1 From 1838905eb3a6283b14b67e71bc1576d6b75ceadc Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:36 +0200 Subject: tcg/ppc: Introduce macro VX4() Introduce macro VX4() used for encoding Altivec instructions. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.inc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 9d678c3..8dc5455 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -319,6 +319,7 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define XO31(opc) (OPCD(31)|((opc)<<1)) #define XO58(opc) (OPCD(58)|(opc)) #define XO62(opc) (OPCD(62)|(opc)) +#define VX4(opc) (OPCD(4)|(opc)) #define B OPCD( 18) #define BC OPCD( 16) -- cgit v1.1 From b82f769cc16b4ee7b628e7a923d3b09eb1d85a80 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:37 +0200 Subject: tcg/ppc: Introduce macros VRT(), VRA(), VRB(), VRC() Introduce macros VRT(), VRA(), VRB(), VRC() used for encoding elements of Altivec instructions. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.inc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 8dc5455..4aad5d2 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -473,6 +473,11 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define MB64(b) ((b)<<5) #define FXM(b) (1 << (19 - (b))) +#define VRT(r) (((r) & 31) << 21) +#define VRA(r) (((r) & 31) << 16) +#define VRB(r) (((r) & 31) << 11) +#define VRC(r) (((r) & 31) << 6) + #define LK 1 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b)) -- cgit v1.1 From 7d9dae0a102bc41ea031b358b47c243c5bc6ced9 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 02:52:00 +0000 Subject: tcg/ppc: Create TCGPowerISA and have_isa Introduce an enum to hold base < 2.06 < 3.00. Use macros to preserve the existing have_isa_2_06 and have_isa_3_00 predicates. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.h | 12 ++++++++++-- tcg/ppc/tcg-target.inc.c | 8 ++++---- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 690fa74..35ba869 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -58,8 +58,16 @@ typedef enum { TCG_AREG0 = TCG_REG_R27 } TCGReg; -extern bool have_isa_2_06; -extern bool have_isa_3_00; +typedef enum { + tcg_isa_base, + tcg_isa_2_06, + tcg_isa_3_00, +} TCGPowerISA; + +extern TCGPowerISA have_isa; + +#define have_isa_2_06 (have_isa >= tcg_isa_2_06) +#define have_isa_3_00 (have_isa >= tcg_isa_3_00) /* optional instructions automatically implemented */ #define TCG_TARGET_HAS_ext8u_i32 0 /* andi */ diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 4aad5d2..0bfaef9 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -64,8 +64,7 @@ static tcg_insn_unit *tb_ret_addr; -bool have_isa_2_06; -bool have_isa_3_00; +TCGPowerISA have_isa; #define HAVE_ISA_2_06 have_isa_2_06 #define HAVE_ISEL have_isa_2_06 @@ -2787,12 +2786,13 @@ static void tcg_target_init(TCGContext *s) unsigned long hwcap = qemu_getauxval(AT_HWCAP); unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2); + have_isa = tcg_isa_base; if (hwcap & PPC_FEATURE_ARCH_2_06) { - have_isa_2_06 = true; + have_isa = tcg_isa_2_06; } #ifdef PPC_FEATURE2_ARCH_3_00 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) { - have_isa_3_00 = true; + have_isa = tcg_isa_3_00; } #endif -- cgit v1.1 From 4e33fe0137b51947f00d210dbd43b4f5b65956ae Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 02:56:34 +0000 Subject: tcg/ppc: Replace HAVE_ISA_2_06 This is identical to have_isa_2_06, so replace it. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 0bfaef9..7cb0002 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -66,7 +66,6 @@ static tcg_insn_unit *tb_ret_addr; TCGPowerISA have_isa; -#define HAVE_ISA_2_06 have_isa_2_06 #define HAVE_ISEL have_isa_2_06 #ifndef CONFIG_SOFTMMU @@ -1797,7 +1796,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) } } else { uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; - if (!HAVE_ISA_2_06 && insn == LDBRX) { + if (!have_isa_2_06 && insn == LDBRX) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0)); @@ -1869,7 +1868,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) } } else { uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; - if (!HAVE_ISA_2_06 && insn == STDBRX) { + if (!have_isa_2_06 && insn == STDBRX) { tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); tcg_out_shri64(s, TCG_REG_R0, datalo, 32); -- cgit v1.1 From 63922f467a200dabc43be3eaf7edbec800365bb5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 03:06:47 +0000 Subject: tcg/ppc: Replace HAVE_ISEL macro with a variable Previously we've been hard-coding knowledge that Power7 has ISEL, but it was an optional instruction before that. Use the AT_HWCAP2 bit, when present, to properly determine support. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 7cb0002..db28ae7 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -65,8 +65,7 @@ static tcg_insn_unit *tb_ret_addr; TCGPowerISA have_isa; - -#define HAVE_ISEL have_isa_2_06 +static bool have_isel; #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG 30 @@ -1100,7 +1099,7 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, /* If we have ISEL, we can implement everything with 3 or 4 insns. All other cases below are also at least 3 insns, so speed up the code generator by not considering them and always using ISEL. */ - if (HAVE_ISEL) { + if (have_isel) { int isel, tab; tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type); @@ -1203,7 +1202,7 @@ static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond, tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type); - if (HAVE_ISEL) { + if (have_isel) { int isel = tcg_to_isel[cond]; /* Swap the V operands if the operation indicates inversion. */ @@ -1247,7 +1246,7 @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, } else { tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type); /* Note that the only other valid constant for a2 is 0. */ - if (HAVE_ISEL) { + if (have_isel) { tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1)); tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0)); } else if (!const_a2 && a0 == a2) { @@ -2795,6 +2794,14 @@ static void tcg_target_init(TCGContext *s) } #endif +#ifdef PPC_FEATURE2_HAS_ISEL + /* Prefer explicit instruction from the kernel. */ + have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0; +#else + /* Fall back to knowing Power7 (2.06) has ISEL. */ + have_isel = have_isa_2_06; +#endif + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; -- cgit v1.1 From 4b06c216826b7e4763afbecde12d3c79aecc6ce7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:35 +0200 Subject: tcg/ppc: Enable tcg backend vector compilation Introduce all of the flags required to enable tcg backend vector support, and a runtime flag to indicate the host supports Altivec instructions. For now, do not actually set have_isa_altivec to true, because we have not yet added all of the code to actually generate all of the required insns. However, we must define these flags in order to disable ifndefs that create stub versions of the functions added here. The change to tcg_out_movi works around a buglet in tcg.c wherein if we do not define tcg_out_dupi_vec we get a declared but not defined Werror, but if we only declare it we get a defined but not used Werror. We need to this change to tcg_out_movi eventually anyway, so it's no biggie. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 25 +++++++++++++++++++ tcg/ppc/tcg-target.inc.c | 62 +++++++++++++++++++++++++++++++++++++++++++++--- tcg/ppc/tcg-target.opc.h | 5 ++++ 3 files changed, 89 insertions(+), 3 deletions(-) create mode 100644 tcg/ppc/tcg-target.opc.h diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 35ba869..498e950 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -65,6 +65,7 @@ typedef enum { } TCGPowerISA; extern TCGPowerISA have_isa; +extern bool have_altivec; #define have_isa_2_06 (have_isa >= tcg_isa_2_06) #define have_isa_3_00 (have_isa >= tcg_isa_3_00) @@ -143,6 +144,30 @@ extern TCGPowerISA have_isa; #define TCG_TARGET_HAS_mulsh_i64 1 #endif +/* + * While technically Altivec could support V64, it has no 64-bit store + * instruction and substituting two 32-bit stores makes the generated + * code quite large. + */ +#define TCG_TARGET_HAS_v64 0 +#define TCG_TARGET_HAS_v128 have_altivec +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 0 +#define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_abs_vec 0 +#define TCG_TARGET_HAS_shi_vec 0 +#define TCG_TARGET_HAS_shs_vec 0 +#define TCG_TARGET_HAS_shv_vec 0 +#define TCG_TARGET_HAS_cmp_vec 0 +#define TCG_TARGET_HAS_mul_vec 0 +#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_minmax_vec 0 +#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec 0 + void flush_icache_range(uintptr_t start, uintptr_t stop); void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t); diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index db28ae7..c7ce0f9 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -66,6 +66,7 @@ static tcg_insn_unit *tb_ret_addr; TCGPowerISA have_isa; static bool have_isel; +bool have_altivec; #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG 30 @@ -714,10 +715,31 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, } } -static inline void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, - tcg_target_long arg) +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long val) { - tcg_out_movi_int(s, type, ret, arg, false); + g_assert_not_reached(); +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, + tcg_target_long arg) +{ + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + tcg_debug_assert(ret < TCG_REG_V0); + tcg_out_movi_int(s, type, ret, arg, false); + break; + + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(ret >= TCG_REG_V0); + tcg_out_dupi_vec(s, type, ret, arg); + break; + + default: + g_assert_not_reached(); + } } static bool mask_operand(uint32_t c, int *mb, int *me) @@ -2602,6 +2624,36 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, } } +int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) +{ + g_assert_not_reached(); +} + +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src) +{ + g_assert_not_reached(); +} + +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg out, TCGReg base, intptr_t offset) +{ + g_assert_not_reached(); +} + +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg *args, const int *const_args) +{ + g_assert_not_reached(); +} + +void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + g_assert_not_reached(); +} + static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; @@ -2804,6 +2856,10 @@ static void tcg_target_init(TCGContext *s) tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; + if (have_altivec) { + tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; + tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; + } tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); diff --git a/tcg/ppc/tcg-target.opc.h b/tcg/ppc/tcg-target.opc.h new file mode 100644 index 0000000..fa680dd --- /dev/null +++ b/tcg/ppc/tcg-target.opc.h @@ -0,0 +1,5 @@ +/* + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ -- cgit v1.1 From 6ef14d7ebe81062c5cc6f1e9bd97f7882078521a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:38 +0200 Subject: tcg/ppc: Add support for load/store/logic/comparison Add various bits and peaces related mostly to load and store operations. In that context, logic, compare, and splat Altivec instructions are used, and, therefore, the support for emitting them is included in this patch too. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 6 +- tcg/ppc/tcg-target.inc.c | 468 +++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 440 insertions(+), 34 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 498e950..a0e59a5 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -153,15 +153,15 @@ extern bool have_altivec; #define TCG_TARGET_HAS_v128 have_altivec #define TCG_TARGET_HAS_v256 0 -#define TCG_TARGET_HAS_andc_vec 0 +#define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec 0 -#define TCG_TARGET_HAS_not_vec 0 +#define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec 0 #define TCG_TARGET_HAS_abs_vec 0 #define TCG_TARGET_HAS_shi_vec 0 #define TCG_TARGET_HAS_shs_vec 0 #define TCG_TARGET_HAS_shv_vec 0 -#define TCG_TARGET_HAS_cmp_vec 0 +#define TCG_TARGET_HAS_cmp_vec 1 #define TCG_TARGET_HAS_mul_vec 0 #define TCG_TARGET_HAS_sat_vec 0 #define TCG_TARGET_HAS_minmax_vec 0 diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index c7ce0f9..1a8d7dc 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -230,6 +230,10 @@ static const char *target_parse_constraint(TCGArgConstraint *ct, ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; break; + case 'v': + ct->ct |= TCG_CT_REG; + ct->u.regs = 0xffffffff00000000ull; + break; case 'L': /* qemu_ld constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; @@ -459,6 +463,39 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define NOP ORI /* ori 0,0,0 */ +#define LVX XO31(103) +#define LVEBX XO31(7) +#define LVEHX XO31(39) +#define LVEWX XO31(71) + +#define STVX XO31(231) +#define STVEWX XO31(199) + +#define VCMPEQUB VX4(6) +#define VCMPEQUH VX4(70) +#define VCMPEQUW VX4(134) +#define VCMPGTSB VX4(774) +#define VCMPGTSH VX4(838) +#define VCMPGTSW VX4(902) +#define VCMPGTUB VX4(518) +#define VCMPGTUH VX4(582) +#define VCMPGTUW VX4(646) + +#define VAND VX4(1028) +#define VANDC VX4(1092) +#define VNOR VX4(1284) +#define VOR VX4(1156) +#define VXOR VX4(1220) + +#define VSPLTB VX4(524) +#define VSPLTH VX4(588) +#define VSPLTW VX4(652) +#define VSPLTISB VX4(780) +#define VSPLTISH VX4(844) +#define VSPLTISW VX4(908) + +#define VSLDOI VX4(44) + #define RT(r) ((r)<<21) #define RS(r) ((r)<<21) #define RA(r) ((r)<<16) @@ -532,6 +569,8 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { tcg_insn_unit *target; + int16_t lo; + int32_t hi; value += addend; target = (tcg_insn_unit *)value; @@ -553,6 +592,20 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type, } *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc); break; + case R_PPC_ADDR32: + /* + * We are abusing this relocation type. Again, this points to + * a pair of insns, lis + load. This is an absolute address + * relocation for PPC32 so the lis cannot be removed. + */ + lo = value; + hi = value - lo; + if (hi + lo != value) { + return false; + } + code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16); + code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo); + break; default: g_assert_not_reached(); } @@ -564,9 +617,29 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - if (ret != arg) { - tcg_out32(s, OR | SAB(arg, ret, arg)); + if (ret == arg) { + return true; + } + switch (type) { + case TCG_TYPE_I64: + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + /* fallthru */ + case TCG_TYPE_I32: + if (ret < TCG_REG_V0 && arg < TCG_REG_V0) { + tcg_out32(s, OR | SAB(arg, ret, arg)); + break; + } else if (ret < TCG_REG_V0 || arg < TCG_REG_V0) { + /* Altivec does not support vector/integer moves. */ + return false; + } + /* fallthru */ + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0); + tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg)); + break; + default: + g_assert_not_reached(); } return true; } @@ -718,7 +791,52 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long val) { - g_assert_not_reached(); + uint32_t load_insn; + int rel, low; + intptr_t add; + + low = (int8_t)val; + if (low >= -16 && low < 16) { + if (val == (tcg_target_long)dup_const(MO_8, low)) { + tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16)); + return; + } + if (val == (tcg_target_long)dup_const(MO_16, low)) { + tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16)); + return; + } + if (val == (tcg_target_long)dup_const(MO_32, low)) { + tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16)); + return; + } + } + + /* + * Otherwise we must load the value from the constant pool. + */ + if (USE_REG_TB) { + rel = R_PPC_ADDR16; + add = -(intptr_t)s->code_gen_ptr; + } else { + rel = R_PPC_ADDR32; + add = 0; + } + + load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); + if (TCG_TARGET_REG_BITS == 64) { + new_pool_l2(s, rel, s->code_ptr, add, val, val); + } else { + new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val); + } + + if (USE_REG_TB) { + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0)); + load_insn |= RA(TCG_REG_TB); + } else { + tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0)); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0)); + } + tcg_out32(s, load_insn); } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, @@ -878,7 +996,7 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, align = 3; /* FALLTHRU */ default: - if (rt != TCG_REG_R0) { + if (rt > TCG_REG_R0 && rt < TCG_REG_V0) { rs = rt; break; } @@ -892,13 +1010,13 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, } /* For unaligned, or very large offsets, use the indexed form. */ - if (offset & align || offset != (int32_t)offset) { + if (offset & align || offset != (int32_t)offset || opi == 0) { if (rs == base) { rs = TCG_REG_R0; } tcg_debug_assert(!is_store || rs != rt); tcg_out_movi(s, TCG_TYPE_PTR, rs, orig); - tcg_out32(s, opx | TAB(rt, base, rs)); + tcg_out32(s, opx | TAB(rt & 31, base, rs)); return; } @@ -919,36 +1037,102 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, base = rs; } if (opi != ADDI || base != rt || l0 != 0) { - tcg_out32(s, opi | TAI(rt, base, l0)); + tcg_out32(s, opi | TAI(rt & 31, base, l0)); } } -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, - TCGReg arg1, intptr_t arg2) +static void tcg_out_vsldoi(TCGContext *s, TCGReg ret, + TCGReg va, TCGReg vb, int shb) { - int opi, opx; + tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6)); +} - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - if (type == TCG_TYPE_I32) { - opi = LWZ, opx = LWZX; - } else { - opi = LD, opx = LDX; +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, + TCGReg base, intptr_t offset) +{ + int shift; + + switch (type) { + case TCG_TYPE_I32: + if (ret < TCG_REG_V0) { + tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset); + break; + } + tcg_debug_assert((offset & 3) == 0); + tcg_out_mem_long(s, 0, LVEWX, ret, base, offset); + shift = (offset - 4) & 0xc; + if (shift) { + tcg_out_vsldoi(s, ret, ret, ret, shift); + } + break; + case TCG_TYPE_I64: + if (ret < TCG_REG_V0) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_mem_long(s, LD, LDX, ret, base, offset); + break; + } + /* fallthru */ + case TCG_TYPE_V64: + tcg_debug_assert(ret >= TCG_REG_V0); + tcg_debug_assert((offset & 7) == 0); + tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16); + if (offset & 8) { + tcg_out_vsldoi(s, ret, ret, ret, 8); + } + break; + case TCG_TYPE_V128: + tcg_debug_assert(ret >= TCG_REG_V0); + tcg_debug_assert((offset & 15) == 0); + tcg_out_mem_long(s, 0, LVX, ret, base, offset); + break; + default: + g_assert_not_reached(); } - tcg_out_mem_long(s, opi, opx, ret, arg1, arg2); } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, - TCGReg arg1, intptr_t arg2) +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, + TCGReg base, intptr_t offset) { - int opi, opx; + int shift; - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - if (type == TCG_TYPE_I32) { - opi = STW, opx = STWX; - } else { - opi = STD, opx = STDX; + switch (type) { + case TCG_TYPE_I32: + if (arg < TCG_REG_V0) { + tcg_out_mem_long(s, STW, STWX, arg, base, offset); + break; + } + tcg_debug_assert((offset & 3) == 0); + shift = (offset - 4) & 0xc; + if (shift) { + tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift); + arg = TCG_VEC_TMP1; + } + tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); + break; + case TCG_TYPE_I64: + if (arg < TCG_REG_V0) { + tcg_debug_assert(TCG_TARGET_REG_BITS == 64); + tcg_out_mem_long(s, STD, STDX, arg, base, offset); + break; + } + /* fallthru */ + case TCG_TYPE_V64: + tcg_debug_assert(arg >= TCG_REG_V0); + tcg_debug_assert((offset & 7) == 0); + if (offset & 8) { + tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8); + arg = TCG_VEC_TMP1; + } + tcg_out_mem_long(s, 0, STVEWX, arg, base, offset); + tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4); + break; + case TCG_TYPE_V128: + tcg_debug_assert(arg >= TCG_REG_V0); + tcg_out_mem_long(s, 0, STVX, arg, base, offset); + break; + default: + g_assert_not_reached(); } - tcg_out_mem_long(s, opi, opx, arg, arg1, arg2); } static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, @@ -2626,32 +2810,236 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) { - g_assert_not_reached(); + switch (opc) { + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_andc_vec: + case INDEX_op_not_vec: + return 1; + case INDEX_op_cmp_vec: + return vece <= MO_32 ? -1 : 0; + default: + return 0; + } } static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src) { - g_assert_not_reached(); + tcg_debug_assert(dst >= TCG_REG_V0); + tcg_debug_assert(src >= TCG_REG_V0); + + /* + * Recall we use (or emulate) VSX integer loads, so the integer is + * right justified within the left (zero-index) double-word. + */ + switch (vece) { + case MO_8: + tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16)); + break; + case MO_16: + tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16)); + break; + case MO_32: + tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16)); + break; + case MO_64: + tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8); + tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8); + break; + default: + g_assert_not_reached(); + } + return true; } static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg out, TCGReg base, intptr_t offset) { - g_assert_not_reached(); + int elt; + + tcg_debug_assert(out >= TCG_REG_V0); + switch (vece) { + case MO_8: + tcg_out_mem_long(s, 0, LVEBX, out, base, offset); + elt = extract32(offset, 0, 4); +#ifndef HOST_WORDS_BIGENDIAN + elt ^= 15; +#endif + tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16)); + break; + case MO_16: + tcg_debug_assert((offset & 1) == 0); + tcg_out_mem_long(s, 0, LVEHX, out, base, offset); + elt = extract32(offset, 1, 3); +#ifndef HOST_WORDS_BIGENDIAN + elt ^= 7; +#endif + tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16)); + break; + case MO_32: + tcg_debug_assert((offset & 3) == 0); + tcg_out_mem_long(s, 0, LVEWX, out, base, offset); + elt = extract32(offset, 2, 2); +#ifndef HOST_WORDS_BIGENDIAN + elt ^= 3; +#endif + tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16)); + break; + case MO_64: + tcg_debug_assert((offset & 7) == 0); + tcg_out_mem_long(s, 0, LVX, out, base, offset & -16); + tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8); + elt = extract32(offset, 3, 1); +#ifndef HOST_WORDS_BIGENDIAN + elt = !elt; +#endif + if (elt) { + tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8); + } else { + tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8); + } + break; + default: + g_assert_not_reached(); + } + return true; } static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, unsigned vecl, unsigned vece, const TCGArg *args, const int *const_args) { - g_assert_not_reached(); + static const uint32_t + eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, 0 }, + gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, 0 }, + gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, 0 }; + + TCGType type = vecl + TCG_TYPE_V64; + TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; + uint32_t insn; + + switch (opc) { + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); + return; + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); + return; + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); + return; + + case INDEX_op_and_vec: + insn = VAND; + break; + case INDEX_op_or_vec: + insn = VOR; + break; + case INDEX_op_xor_vec: + insn = VXOR; + break; + case INDEX_op_andc_vec: + insn = VANDC; + break; + case INDEX_op_not_vec: + insn = VNOR; + a2 = a1; + break; + + case INDEX_op_cmp_vec: + switch (args[3]) { + case TCG_COND_EQ: + insn = eq_op[vece]; + break; + case TCG_COND_GT: + insn = gts_op[vece]; + break; + case TCG_COND_GTU: + insn = gtu_op[vece]; + break; + default: + g_assert_not_reached(); + } + break; + + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } + + tcg_debug_assert(insn != 0); + tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2)); +} + +static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + bool need_swap = false, need_inv = false; + + tcg_debug_assert(vece <= MO_32); + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_GT: + case TCG_COND_GTU: + break; + case TCG_COND_NE: + case TCG_COND_LE: + case TCG_COND_LEU: + need_inv = true; + break; + case TCG_COND_LT: + case TCG_COND_LTU: + need_swap = true; + break; + case TCG_COND_GE: + case TCG_COND_GEU: + need_swap = need_inv = true; + break; + default: + g_assert_not_reached(); + } + + if (need_inv) { + cond = tcg_invert_cond(cond); + } + if (need_swap) { + TCGv_vec t1; + t1 = v1, v1 = v2, v2 = t1; + cond = tcg_swap_cond(cond); + } + + vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0), + tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond); + + if (need_inv) { + tcg_gen_not_vec(vece, v0, v0); + } } void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { - g_assert_not_reached(); + va_list va; + TCGv_vec v0, v1, v2; + + va_start(va, a0); + v0 = temp_tcgv_vec(arg_temp(a0)); + v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + + switch (opc) { + case INDEX_op_cmp_vec: + expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); + break; + default: + g_assert_not_reached(); + } + va_end(va); } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) @@ -2691,6 +3079,9 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) = { .args_ct_str = { "r", "r", "r", "r", "rI", "rZM" } }; static const TCGTargetOpDef sub2 = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } }; + static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } }; + static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } }; + static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } }; switch (op) { case INDEX_op_goto_ptr: @@ -2826,6 +3217,21 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) return (TCG_TARGET_REG_BITS == 64 ? &S_S : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S); + case INDEX_op_and_vec: + case INDEX_op_or_vec: + case INDEX_op_xor_vec: + case INDEX_op_andc_vec: + case INDEX_op_orc_vec: + case INDEX_op_cmp_vec: + return &v_v_v; + case INDEX_op_not_vec: + case INDEX_op_dup_vec: + return &v_v; + case INDEX_op_ld_vec: + case INDEX_op_st_vec: + case INDEX_op_dupm_vec: + return &v_r; + default: return NULL; } -- cgit v1.1 From e2382972829f93104cece078e7ed4d6323189288 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:39 +0200 Subject: tcg/ppc: Add support for vector maximum/minimum Add support for vector maximum/minimum using Altivec instructions VMAXSB, VMAXSH, VMAXSW, VMAXUB, VMAXUH, VMAXUW, and VMINSB, VMINSH, VMINSW, VMINUB, VMINUH, VMINUW. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 2 +- tcg/ppc/tcg-target.inc.c | 40 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index a0e59a5..13699f1 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -164,7 +164,7 @@ extern bool have_altivec; #define TCG_TARGET_HAS_cmp_vec 1 #define TCG_TARGET_HAS_mul_vec 0 #define TCG_TARGET_HAS_sat_vec 0 -#define TCG_TARGET_HAS_minmax_vec 0 +#define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_bitsel_vec 0 #define TCG_TARGET_HAS_cmpsel_vec 0 diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 1a8d7dc..6879be6 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -471,6 +471,19 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define STVX XO31(231) #define STVEWX XO31(199) +#define VMAXSB VX4(258) +#define VMAXSH VX4(322) +#define VMAXSW VX4(386) +#define VMAXUB VX4(2) +#define VMAXUH VX4(66) +#define VMAXUW VX4(130) +#define VMINSB VX4(770) +#define VMINSH VX4(834) +#define VMINSW VX4(898) +#define VMINUB VX4(514) +#define VMINUH VX4(578) +#define VMINUW VX4(642) + #define VCMPEQUB VX4(6) #define VCMPEQUH VX4(70) #define VCMPEQUW VX4(134) @@ -2817,6 +2830,11 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_andc_vec: case INDEX_op_not_vec: return 1; + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + return vece <= MO_32; case INDEX_op_cmp_vec: return vece <= MO_32 ? -1 : 0; default: @@ -2914,7 +2932,11 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, static const uint32_t eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, 0 }, gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, 0 }, - gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, 0 }; + gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, 0 }, + umin_op[4] = { VMINUB, VMINUH, VMINUW, 0 }, + smin_op[4] = { VMINSB, VMINSH, VMINSW, 0 }, + umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, 0 }, + smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, 0 }; TCGType type = vecl + TCG_TYPE_V64; TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; @@ -2931,6 +2953,18 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, tcg_out_dupm_vec(s, type, vece, a0, a1, a2); return; + case INDEX_op_smin_vec: + insn = smin_op[vece]; + break; + case INDEX_op_umin_vec: + insn = umin_op[vece]; + break; + case INDEX_op_smax_vec: + insn = smax_op[vece]; + break; + case INDEX_op_umax_vec: + insn = umax_op[vece]; + break; case INDEX_op_and_vec: insn = VAND; break; @@ -3223,6 +3257,10 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_andc_vec: case INDEX_op_orc_vec: case INDEX_op_cmp_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: return &v_v_v; case INDEX_op_not_vec: case INDEX_op_dup_vec: -- cgit v1.1 From d67508117db474971dfd3fe29714ec4d15cf2393 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:40 +0200 Subject: tcg/ppc: Add support for vector add/subtract Add support for vector add/subtract using Altivec instructions: VADDUBM, VADDUHM, VADDUWM, VSUBUBM, VSUBUHM, VSUBUWM. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.inc.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 6879be6..6cfc78b 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -471,6 +471,14 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define STVX XO31(231) #define STVEWX XO31(199) +#define VADDUBM VX4(0) +#define VADDUHM VX4(64) +#define VADDUWM VX4(128) + +#define VSUBUBM VX4(1024) +#define VSUBUHM VX4(1088) +#define VSUBUWM VX4(1152) + #define VMAXSB VX4(258) #define VMAXSH VX4(322) #define VMAXSW VX4(386) @@ -2830,6 +2838,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_andc_vec: case INDEX_op_not_vec: return 1; + case INDEX_op_add_vec: + case INDEX_op_sub_vec: case INDEX_op_smax_vec: case INDEX_op_smin_vec: case INDEX_op_umax_vec: @@ -2930,6 +2940,8 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { static const uint32_t + add_op[4] = { VADDUBM, VADDUHM, VADDUWM, 0 }, + sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, 0 }, eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, 0 }, gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, 0 }, gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, 0 }, @@ -2953,6 +2965,12 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, tcg_out_dupm_vec(s, type, vece, a0, a1, a2); return; + case INDEX_op_add_vec: + insn = add_op[vece]; + break; + case INDEX_op_sub_vec: + insn = sub_op[vece]; + break; case INDEX_op_smin_vec: insn = smin_op[vece]; break; @@ -3251,6 +3269,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) return (TCG_TARGET_REG_BITS == 64 ? &S_S : TARGET_LONG_BITS == 32 ? &S_S_S : &S_S_S_S); + case INDEX_op_add_vec: + case INDEX_op_sub_vec: case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: -- cgit v1.1 From e9d1a53ae6d3a276f0d50090f80d923b9ee74631 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:41 +0200 Subject: tcg/ppc: Add support for vector saturated add/subtract Add support for vector saturated add/subtract using Altivec instructions: VADDSBS, VADDSHS, VADDSWS, VADDUBS, VADDUHS, VADDUWS, and VSUBSBS, VSUBSHS, VSUBSWS, VSUBUBS, VSUBUHS, VSUBUWS. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 2 +- tcg/ppc/tcg-target.inc.c | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 13699f1..3ebbbfa 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -163,7 +163,7 @@ extern bool have_altivec; #define TCG_TARGET_HAS_shv_vec 0 #define TCG_TARGET_HAS_cmp_vec 1 #define TCG_TARGET_HAS_mul_vec 0 -#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_bitsel_vec 0 #define TCG_TARGET_HAS_cmpsel_vec 0 diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 6cfc78b..a116520 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -471,12 +471,24 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define STVX XO31(231) #define STVEWX XO31(199) +#define VADDSBS VX4(768) +#define VADDUBS VX4(512) #define VADDUBM VX4(0) +#define VADDSHS VX4(832) +#define VADDUHS VX4(576) #define VADDUHM VX4(64) +#define VADDSWS VX4(896) +#define VADDUWS VX4(640) #define VADDUWM VX4(128) +#define VSUBSBS VX4(1792) +#define VSUBUBS VX4(1536) #define VSUBUBM VX4(1024) +#define VSUBSHS VX4(1856) +#define VSUBUHS VX4(1600) #define VSUBUHM VX4(1088) +#define VSUBSWS VX4(1920) +#define VSUBUWS VX4(1664) #define VSUBUWM VX4(1152) #define VMAXSB VX4(258) @@ -2844,6 +2856,10 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: return vece <= MO_32; case INDEX_op_cmp_vec: return vece <= MO_32 ? -1 : 0; @@ -2945,6 +2961,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, 0 }, gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, 0 }, gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, 0 }, + ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 }, + usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 }, + sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 }, + ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 }, umin_op[4] = { VMINUB, VMINUH, VMINUW, 0 }, smin_op[4] = { VMINSB, VMINSH, VMINSW, 0 }, umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, 0 }, @@ -2971,6 +2991,18 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_sub_vec: insn = sub_op[vece]; break; + case INDEX_op_ssadd_vec: + insn = ssadd_op[vece]; + break; + case INDEX_op_sssub_vec: + insn = sssub_op[vece]; + break; + case INDEX_op_usadd_vec: + insn = usadd_op[vece]; + break; + case INDEX_op_ussub_vec: + insn = ussub_op[vece]; + break; case INDEX_op_smin_vec: insn = smin_op[vece]; break; @@ -3277,6 +3309,10 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_andc_vec: case INDEX_op_orc_vec: case INDEX_op_cmp_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: case INDEX_op_smax_vec: case INDEX_op_smin_vec: case INDEX_op_umax_vec: -- cgit v1.1 From dabae0971b64da3e44451e9a2a975c5bc260cf2a Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:44 +0200 Subject: tcg/ppc: Support vector shift by immediate For Altivec, this is done via vector shift by vector, and loading the immediate into a register. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 2 +- tcg/ppc/tcg-target.inc.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 3ebbbfa..ffb2269 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -160,7 +160,7 @@ extern bool have_altivec; #define TCG_TARGET_HAS_abs_vec 0 #define TCG_TARGET_HAS_shi_vec 0 #define TCG_TARGET_HAS_shs_vec 0 -#define TCG_TARGET_HAS_shv_vec 0 +#define TCG_TARGET_HAS_shv_vec 1 #define TCG_TARGET_HAS_cmp_vec 1 #define TCG_TARGET_HAS_mul_vec 0 #define TCG_TARGET_HAS_sat_vec 1 diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index a116520..a9264ec 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -514,6 +514,16 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VCMPGTUH VX4(582) #define VCMPGTUW VX4(646) +#define VSLB VX4(260) +#define VSLH VX4(324) +#define VSLW VX4(388) +#define VSRB VX4(516) +#define VSRH VX4(580) +#define VSRW VX4(644) +#define VSRAB VX4(772) +#define VSRAH VX4(836) +#define VSRAW VX4(900) + #define VAND VX4(1028) #define VANDC VX4(1092) #define VNOR VX4(1284) @@ -2860,8 +2870,14 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: return vece <= MO_32; case INDEX_op_cmp_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: return vece <= MO_32 ? -1 : 0; default: return 0; @@ -2968,7 +2984,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, umin_op[4] = { VMINUB, VMINUH, VMINUW, 0 }, smin_op[4] = { VMINSB, VMINSH, VMINSW, 0 }, umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, 0 }, - smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, 0 }; + smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, 0 }, + shlv_op[4] = { VSLB, VSLH, VSLW, 0 }, + shrv_op[4] = { VSRB, VSRH, VSRW, 0 }, + sarv_op[4] = { VSRAB, VSRAH, VSRAW, 0 }; TCGType type = vecl + TCG_TYPE_V64; TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; @@ -3015,6 +3034,15 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_umax_vec: insn = umax_op[vece]; break; + case INDEX_op_shlv_vec: + insn = shlv_op[vece]; + break; + case INDEX_op_shrv_vec: + insn = shrv_op[vece]; + break; + case INDEX_op_sarv_vec: + insn = sarv_op[vece]; + break; case INDEX_op_and_vec: insn = VAND; break; @@ -3059,6 +3087,18 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2)); } +static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGArg imm, TCGOpcode opci) +{ + TCGv_vec t1 = tcg_temp_new_vec(type); + + /* Splat w/bytes for xxspltib. */ + tcg_gen_dupi_vec(MO_8, t1, imm & ((8 << vece) - 1)); + vec_gen_3(opci, type, vece, tcgv_vec_arg(v0), + tcgv_vec_arg(v1), tcgv_vec_arg(t1)); + tcg_temp_free_vec(t1); +} + static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGv_vec v2, TCGCond cond) { @@ -3110,14 +3150,25 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, { va_list va; TCGv_vec v0, v1, v2; + TCGArg a2; va_start(va, a0); v0 = temp_tcgv_vec(arg_temp(a0)); v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); - v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + a2 = va_arg(va, TCGArg); switch (opc) { + case INDEX_op_shli_vec: + expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec); + break; + case INDEX_op_shri_vec: + expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec); + break; + case INDEX_op_sari_vec: + expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec); + break; case INDEX_op_cmp_vec: + v2 = temp_tcgv_vec(arg_temp(a2)); expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); break; default: @@ -3317,6 +3368,9 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: return &v_v_v; case INDEX_op_not_vec: case INDEX_op_dup_vec: -- cgit v1.1 From d9897efa1fd3174eca5268ba017aa39242e5ddc5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:42 +0200 Subject: tcg/ppc: Support vector multiply For Altivec, this is always an expansion. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 2 +- tcg/ppc/tcg-target.inc.c | 113 ++++++++++++++++++++++++++++++++++++++++++++++- tcg/ppc/tcg-target.opc.h | 8 ++++ 3 files changed, 121 insertions(+), 2 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index ffb2269..f50b7f4 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -162,7 +162,7 @@ extern bool have_altivec; #define TCG_TARGET_HAS_shs_vec 0 #define TCG_TARGET_HAS_shv_vec 1 #define TCG_TARGET_HAS_cmp_vec 1 -#define TCG_TARGET_HAS_mul_vec 0 +#define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 #define TCG_TARGET_HAS_bitsel_vec 0 diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index a9264ec..d4b3354 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -523,6 +523,25 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VSRAB VX4(772) #define VSRAH VX4(836) #define VSRAW VX4(900) +#define VRLB VX4(4) +#define VRLH VX4(68) +#define VRLW VX4(132) + +#define VMULEUB VX4(520) +#define VMULEUH VX4(584) +#define VMULOUB VX4(8) +#define VMULOUH VX4(72) +#define VMSUMUHM VX4(38) + +#define VMRGHB VX4(12) +#define VMRGHH VX4(76) +#define VMRGHW VX4(140) +#define VMRGLB VX4(268) +#define VMRGLH VX4(332) +#define VMRGLW VX4(396) + +#define VPKUHUM VX4(14) +#define VPKUWUM VX4(78) #define VAND VX4(1028) #define VANDC VX4(1092) @@ -2875,6 +2894,7 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_sarv_vec: return vece <= MO_32; case INDEX_op_cmp_vec: + case INDEX_op_mul_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: @@ -2987,7 +3007,13 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, 0 }, shlv_op[4] = { VSLB, VSLH, VSLW, 0 }, shrv_op[4] = { VSRB, VSRH, VSRW, 0 }, - sarv_op[4] = { VSRAB, VSRAH, VSRAW, 0 }; + sarv_op[4] = { VSRAB, VSRAH, VSRAW, 0 }, + mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 }, + mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 }, + muleu_op[4] = { VMULEUB, VMULEUH, 0, 0 }, + mulou_op[4] = { VMULOUB, VMULOUH, 0, 0 }, + pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 }, + rotl_op[4] = { VRLB, VRLH, VRLW, 0 }; TCGType type = vecl + TCG_TYPE_V64; TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; @@ -3076,6 +3102,29 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, } break; + case INDEX_op_ppc_mrgh_vec: + insn = mrgh_op[vece]; + break; + case INDEX_op_ppc_mrgl_vec: + insn = mrgl_op[vece]; + break; + case INDEX_op_ppc_muleu_vec: + insn = muleu_op[vece]; + break; + case INDEX_op_ppc_mulou_vec: + insn = mulou_op[vece]; + break; + case INDEX_op_ppc_pkum_vec: + insn = pkum_op[vece]; + break; + case INDEX_op_ppc_rotl_vec: + insn = rotl_op[vece]; + break; + case INDEX_op_ppc_msum_vec: + tcg_debug_assert(vece == MO_16); + tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3])); + return; + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_dupi_vec: /* Always emitted via tcg_out_movi. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ @@ -3145,6 +3194,53 @@ static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, } } +static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2) +{ + TCGv_vec t1 = tcg_temp_new_vec(type); + TCGv_vec t2 = tcg_temp_new_vec(type); + TCGv_vec t3, t4; + + switch (vece) { + case MO_8: + case MO_16: + vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1), + tcgv_vec_arg(v1), tcgv_vec_arg(v2)); + vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2), + tcgv_vec_arg(v1), tcgv_vec_arg(v2)); + vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0), + tcgv_vec_arg(t1), tcgv_vec_arg(t2)); + vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1), + tcgv_vec_arg(t1), tcgv_vec_arg(t2)); + vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0), + tcgv_vec_arg(v0), tcgv_vec_arg(t1)); + break; + + case MO_32: + t3 = tcg_temp_new_vec(type); + t4 = tcg_temp_new_vec(type); + tcg_gen_dupi_vec(MO_8, t4, -16); + vec_gen_3(INDEX_op_ppc_rotl_vec, type, MO_32, tcgv_vec_arg(t1), + tcgv_vec_arg(v2), tcgv_vec_arg(t4)); + vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2), + tcgv_vec_arg(v1), tcgv_vec_arg(v2)); + tcg_gen_dupi_vec(MO_8, t3, 0); + vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t3), + tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(t3)); + vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t3), + tcgv_vec_arg(t3), tcgv_vec_arg(t4)); + tcg_gen_add_vec(MO_32, v0, t2, t3); + tcg_temp_free_vec(t3); + tcg_temp_free_vec(t4); + break; + + default: + g_assert_not_reached(); + } + tcg_temp_free_vec(t1); + tcg_temp_free_vec(t2); +} + void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { @@ -3171,6 +3267,10 @@ void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, v2 = temp_tcgv_vec(arg_temp(a2)); expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); break; + case INDEX_op_mul_vec: + v2 = temp_tcgv_vec(arg_temp(a2)); + expand_vec_mul(type, vece, v0, v1, v2); + break; default: g_assert_not_reached(); } @@ -3217,6 +3317,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } }; static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } }; static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } }; + static const TCGTargetOpDef v_v_v_v + = { .args_ct_str = { "v", "v", "v", "v" } }; switch (op) { case INDEX_op_goto_ptr: @@ -3354,6 +3456,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_add_vec: case INDEX_op_sub_vec: + case INDEX_op_mul_vec: case INDEX_op_and_vec: case INDEX_op_or_vec: case INDEX_op_xor_vec: @@ -3371,6 +3474,12 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_shlv_vec: case INDEX_op_shrv_vec: case INDEX_op_sarv_vec: + case INDEX_op_ppc_mrgh_vec: + case INDEX_op_ppc_mrgl_vec: + case INDEX_op_ppc_muleu_vec: + case INDEX_op_ppc_mulou_vec: + case INDEX_op_ppc_pkum_vec: + case INDEX_op_ppc_rotl_vec: return &v_v_v; case INDEX_op_not_vec: case INDEX_op_dup_vec: @@ -3379,6 +3488,8 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_st_vec: case INDEX_op_dupm_vec: return &v_r; + case INDEX_op_ppc_msum_vec: + return &v_v_v_v; default: return NULL; diff --git a/tcg/ppc/tcg-target.opc.h b/tcg/ppc/tcg-target.opc.h index fa680dd..db24a11 100644 --- a/tcg/ppc/tcg-target.opc.h +++ b/tcg/ppc/tcg-target.opc.h @@ -3,3 +3,11 @@ * emitted by tcg_expand_vec_op. For those familiar with GCC internals, * consider these to be UNSPEC with names. */ + +DEF(ppc_mrgh_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_mrgl_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_msum_vec, 1, 3, 0, IMPLVEC) +DEF(ppc_muleu_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_mulou_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_pkum_vec, 1, 2, 0, IMPLVEC) +DEF(ppc_rotl_vec, 1, 2, 0, IMPLVEC) -- cgit v1.1 From 597cf978926ca3a745482a11096de8d433c6be1c Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:46 +0200 Subject: tcg/ppc: Support vector dup2 This is only used for 32-bit hosts. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.inc.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index d4b3354..8a50813 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -3102,6 +3102,14 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, } break; + case INDEX_op_dup2_vec: + assert(TCG_TARGET_REG_BITS == 32); + /* With inputs a1 = xLxx, a2 = xHxx */ + tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */ + tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */ + tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */ + return; + case INDEX_op_ppc_mrgh_vec: insn = mrgh_op[vece]; break; @@ -3480,6 +3488,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_ppc_mulou_vec: case INDEX_op_ppc_pkum_vec: case INDEX_op_ppc_rotl_vec: + case INDEX_op_dup2_vec: return &v_v_v; case INDEX_op_not_vec: case INDEX_op_dup_vec: -- cgit v1.1 From 68f340d4cd9f0423039e4706a6602673d7ca9101 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 29 Jun 2019 10:19:04 +0000 Subject: tcg/ppc: Enable Altivec detection Now that we have implemented the required tcg operations, we can enable detection of host vector support. Tested-by: Mark Cave-Ayland (PPC32) Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 8a50813..d739f4b 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -3528,6 +3528,10 @@ static void tcg_target_init(TCGContext *s) have_isel = have_isa_2_06; #endif + if (hwcap & PPC_FEATURE_HAS_ALTIVEC) { + have_altivec = true; + } + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; if (have_altivec) { -- cgit v1.1 From 47c906ae6f54fa10b3f072863d8993e790a14439 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sun, 23 Jun 2019 19:04:47 +0200 Subject: tcg/ppc: Update vector support for VSX The VSX instruction set instructions include double-word loads and stores, double-word load and splat, double-word permute, and bit select. All of which require multiple operations in the Altivec instruction set. Because the VSX registers map %vsr32 to %vr0, and we have no current intention or need to use vector registers outside %vr0-%vr19, force on the {ax,bx,cx,tx} bits within the added VSX insns so that we don't have to otherwise modify the VR[TABC] macros. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson Signed-off-by: Aleksandar Markovic --- tcg/ppc/tcg-target.h | 5 +++-- tcg/ppc/tcg-target.inc.c | 52 ++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 51 insertions(+), 6 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index f50b7f4..c974ca2 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -66,6 +66,7 @@ typedef enum { extern TCGPowerISA have_isa; extern bool have_altivec; +extern bool have_vsx; #define have_isa_2_06 (have_isa >= tcg_isa_2_06) #define have_isa_3_00 (have_isa >= tcg_isa_3_00) @@ -149,7 +150,7 @@ extern bool have_altivec; * instruction and substituting two 32-bit stores makes the generated * code quite large. */ -#define TCG_TARGET_HAS_v64 0 +#define TCG_TARGET_HAS_v64 have_vsx #define TCG_TARGET_HAS_v128 have_altivec #define TCG_TARGET_HAS_v256 0 @@ -165,7 +166,7 @@ extern bool have_altivec; #define TCG_TARGET_HAS_mul_vec 1 #define TCG_TARGET_HAS_sat_vec 1 #define TCG_TARGET_HAS_minmax_vec 1 -#define TCG_TARGET_HAS_bitsel_vec 0 +#define TCG_TARGET_HAS_bitsel_vec have_vsx #define TCG_TARGET_HAS_cmpsel_vec 0 void flush_icache_range(uintptr_t start, uintptr_t stop); diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index d739f4b..2388958 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -67,6 +67,7 @@ static tcg_insn_unit *tb_ret_addr; TCGPowerISA have_isa; static bool have_isel; bool have_altivec; +bool have_vsx; #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG 30 @@ -467,9 +468,12 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define LVEBX XO31(7) #define LVEHX XO31(39) #define LVEWX XO31(71) +#define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */ +#define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */ #define STVX XO31(231) #define STVEWX XO31(199) +#define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */ #define VADDSBS VX4(768) #define VADDUBS VX4(512) @@ -558,6 +562,9 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VSLDOI VX4(44) +#define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */ +#define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */ + #define RT(r) ((r)<<21) #define RS(r) ((r)<<21) #define RA(r) ((r)<<16) @@ -884,11 +891,21 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, add = 0; } - load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); - if (TCG_TARGET_REG_BITS == 64) { - new_pool_l2(s, rel, s->code_ptr, add, val, val); + if (have_vsx) { + load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX; + load_insn |= VRT(ret) | RB(TCG_REG_TMP1); + if (TCG_TARGET_REG_BITS == 64) { + new_pool_label(s, val, rel, s->code_ptr, add); + } else { + new_pool_l2(s, rel, s->code_ptr, add, val, val); + } } else { - new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val); + load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); + if (TCG_TARGET_REG_BITS == 64) { + new_pool_l2(s, rel, s->code_ptr, add, val, val); + } else { + new_pool_l4(s, rel, s->code_ptr, add, val, val, val, val); + } } if (USE_REG_TB) { @@ -1136,6 +1153,10 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, /* fallthru */ case TCG_TYPE_V64: tcg_debug_assert(ret >= TCG_REG_V0); + if (have_vsx) { + tcg_out_mem_long(s, 0, LXSDX, ret, base, offset); + break; + } tcg_debug_assert((offset & 7) == 0); tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16); if (offset & 8) { @@ -1180,6 +1201,10 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, /* fallthru */ case TCG_TYPE_V64: tcg_debug_assert(arg >= TCG_REG_V0); + if (have_vsx) { + tcg_out_mem_long(s, 0, STXSDX, arg, base, offset); + break; + } tcg_debug_assert((offset & 7) == 0); if (offset & 8) { tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8); @@ -2899,6 +2924,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_shri_vec: case INDEX_op_sari_vec: return vece <= MO_32 ? -1 : 0; + case INDEX_op_bitsel_vec: + return have_vsx; default: return 0; } @@ -2925,6 +2952,10 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16)); break; case MO_64: + if (have_vsx) { + tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src)); + break; + } tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8); tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8); break; @@ -2968,6 +2999,10 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16)); break; case MO_64: + if (have_vsx) { + tcg_out_mem_long(s, 0, LXVDSX, out, base, offset); + break; + } tcg_debug_assert((offset & 7) == 0); tcg_out_mem_long(s, 0, LVX, out, base, offset & -16); tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8); @@ -3102,6 +3137,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, } break; + case INDEX_op_bitsel_vec: + tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3])); + return; + case INDEX_op_dup2_vec: assert(TCG_TARGET_REG_BITS == 32); /* With inputs a1 = xLxx, a2 = xHxx */ @@ -3497,6 +3536,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_st_vec: case INDEX_op_dupm_vec: return &v_r; + case INDEX_op_bitsel_vec: case INDEX_op_ppc_msum_vec: return &v_v_v_v; @@ -3530,6 +3570,10 @@ static void tcg_target_init(TCGContext *s) if (hwcap & PPC_FEATURE_HAS_ALTIVEC) { have_altivec = true; + /* We only care about the portion of VSX that overlaps Altivec. */ + if (hwcap & PPC_FEATURE_HAS_VSX) { + have_vsx = true; + } } tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; -- cgit v1.1 From 64ff1c6d21745f2be89c76604391ae454f8490d0 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 03:39:24 +0000 Subject: tcg/ppc: Update vector support for v2.07 Altivec These new instructions are conditional only on MSR.VEC and are thus part of the Altivec instruction set, and not VSX. This includes lots of double-word arithmetic and a few extra logical operations. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.h | 4 ++- tcg/ppc/tcg-target.inc.c | 85 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 67 insertions(+), 22 deletions(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index c974ca2..13197ed 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -61,6 +61,7 @@ typedef enum { typedef enum { tcg_isa_base, tcg_isa_2_06, + tcg_isa_2_07, tcg_isa_3_00, } TCGPowerISA; @@ -69,6 +70,7 @@ extern bool have_altivec; extern bool have_vsx; #define have_isa_2_06 (have_isa >= tcg_isa_2_06) +#define have_isa_2_07 (have_isa >= tcg_isa_2_07) #define have_isa_3_00 (have_isa >= tcg_isa_3_00) /* optional instructions automatically implemented */ @@ -155,7 +157,7 @@ extern bool have_vsx; #define TCG_TARGET_HAS_v256 0 #define TCG_TARGET_HAS_andc_vec 1 -#define TCG_TARGET_HAS_orc_vec 0 +#define TCG_TARGET_HAS_orc_vec have_isa_2_07 #define TCG_TARGET_HAS_not_vec 1 #define TCG_TARGET_HAS_neg_vec 0 #define TCG_TARGET_HAS_abs_vec 0 diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 2388958..bc3a669 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -484,6 +484,7 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VADDSWS VX4(896) #define VADDUWS VX4(640) #define VADDUWM VX4(128) +#define VADDUDM VX4(192) /* v2.07 */ #define VSUBSBS VX4(1792) #define VSUBUBS VX4(1536) @@ -494,47 +495,62 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VSUBSWS VX4(1920) #define VSUBUWS VX4(1664) #define VSUBUWM VX4(1152) +#define VSUBUDM VX4(1216) /* v2.07 */ #define VMAXSB VX4(258) #define VMAXSH VX4(322) #define VMAXSW VX4(386) +#define VMAXSD VX4(450) /* v2.07 */ #define VMAXUB VX4(2) #define VMAXUH VX4(66) #define VMAXUW VX4(130) +#define VMAXUD VX4(194) /* v2.07 */ #define VMINSB VX4(770) #define VMINSH VX4(834) #define VMINSW VX4(898) +#define VMINSD VX4(962) /* v2.07 */ #define VMINUB VX4(514) #define VMINUH VX4(578) #define VMINUW VX4(642) +#define VMINUD VX4(706) /* v2.07 */ #define VCMPEQUB VX4(6) #define VCMPEQUH VX4(70) #define VCMPEQUW VX4(134) +#define VCMPEQUD VX4(199) /* v2.07 */ #define VCMPGTSB VX4(774) #define VCMPGTSH VX4(838) #define VCMPGTSW VX4(902) +#define VCMPGTSD VX4(967) /* v2.07 */ #define VCMPGTUB VX4(518) #define VCMPGTUH VX4(582) #define VCMPGTUW VX4(646) +#define VCMPGTUD VX4(711) /* v2.07 */ #define VSLB VX4(260) #define VSLH VX4(324) #define VSLW VX4(388) +#define VSLD VX4(1476) /* v2.07 */ #define VSRB VX4(516) #define VSRH VX4(580) #define VSRW VX4(644) +#define VSRD VX4(1732) /* v2.07 */ #define VSRAB VX4(772) #define VSRAH VX4(836) #define VSRAW VX4(900) +#define VSRAD VX4(964) /* v2.07 */ #define VRLB VX4(4) #define VRLH VX4(68) #define VRLW VX4(132) +#define VRLD VX4(196) /* v2.07 */ #define VMULEUB VX4(520) #define VMULEUH VX4(584) +#define VMULEUW VX4(648) /* v2.07 */ #define VMULOUB VX4(8) #define VMULOUH VX4(72) +#define VMULOUW VX4(136) /* v2.07 */ +#define VMULUWM VX4(137) /* v2.07 */ #define VMSUMUHM VX4(38) #define VMRGHB VX4(12) @@ -552,6 +568,9 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VNOR VX4(1284) #define VOR VX4(1156) #define VXOR VX4(1220) +#define VEQV VX4(1668) /* v2.07 */ +#define VNAND VX4(1412) /* v2.07 */ +#define VORC VX4(1348) /* v2.07 */ #define VSPLTB VX4(524) #define VSPLTH VX4(588) @@ -2904,26 +2923,37 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_andc_vec: case INDEX_op_not_vec: return 1; + case INDEX_op_orc_vec: + return have_isa_2_07; case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_smax_vec: case INDEX_op_smin_vec: case INDEX_op_umax_vec: case INDEX_op_umin_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + return vece <= MO_32 || have_isa_2_07; case INDEX_op_ssadd_vec: case INDEX_op_sssub_vec: case INDEX_op_usadd_vec: case INDEX_op_ussub_vec: - case INDEX_op_shlv_vec: - case INDEX_op_shrv_vec: - case INDEX_op_sarv_vec: return vece <= MO_32; case INDEX_op_cmp_vec: - case INDEX_op_mul_vec: case INDEX_op_shli_vec: case INDEX_op_shri_vec: case INDEX_op_sari_vec: - return vece <= MO_32 ? -1 : 0; + return vece <= MO_32 || have_isa_2_07 ? -1 : 0; + case INDEX_op_mul_vec: + switch (vece) { + case MO_8: + case MO_16: + return -1; + case MO_32: + return have_isa_2_07 ? 1 : -1; + } + return 0; case INDEX_op_bitsel_vec: return have_vsx; default: @@ -3027,28 +3057,28 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { static const uint32_t - add_op[4] = { VADDUBM, VADDUHM, VADDUWM, 0 }, - sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, 0 }, - eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, 0 }, - gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, 0 }, - gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, 0 }, + add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM }, + sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM }, + eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD }, + gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD }, + gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD }, ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 }, usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 }, sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 }, ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 }, - umin_op[4] = { VMINUB, VMINUH, VMINUW, 0 }, - smin_op[4] = { VMINSB, VMINSH, VMINSW, 0 }, - umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, 0 }, - smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, 0 }, - shlv_op[4] = { VSLB, VSLH, VSLW, 0 }, - shrv_op[4] = { VSRB, VSRH, VSRW, 0 }, - sarv_op[4] = { VSRAB, VSRAH, VSRAW, 0 }, + umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD }, + smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD }, + umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD }, + smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD }, + shlv_op[4] = { VSLB, VSLH, VSLW, VSLD }, + shrv_op[4] = { VSRB, VSRH, VSRW, VSRD }, + sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD }, mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 }, mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 }, - muleu_op[4] = { VMULEUB, VMULEUH, 0, 0 }, - mulou_op[4] = { VMULOUB, VMULOUH, 0, 0 }, + muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 }, + mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 }, pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 }, - rotl_op[4] = { VRLB, VRLH, VRLW, 0 }; + rotl_op[4] = { VRLB, VRLH, VRLW, VRLD }; TCGType type = vecl + TCG_TYPE_V64; TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; @@ -3071,6 +3101,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_sub_vec: insn = sub_op[vece]; break; + case INDEX_op_mul_vec: + tcg_debug_assert(vece == MO_32 && have_isa_2_07); + insn = VMULUWM; + break; case INDEX_op_ssadd_vec: insn = ssadd_op[vece]; break; @@ -3120,6 +3154,9 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, insn = VNOR; a2 = a1; break; + case INDEX_op_orc_vec: + insn = VORC; + break; case INDEX_op_cmp_vec: switch (args[3]) { @@ -3200,7 +3237,7 @@ static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, { bool need_swap = false, need_inv = false; - tcg_debug_assert(vece <= MO_32); + tcg_debug_assert(vece <= MO_32 || have_isa_2_07); switch (cond) { case TCG_COND_EQ: @@ -3264,6 +3301,7 @@ static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0, break; case MO_32: + tcg_debug_assert(!have_isa_2_07); t3 = tcg_temp_new_vec(type); t4 = tcg_temp_new_vec(type); tcg_gen_dupi_vec(MO_8, t4, -16); @@ -3554,6 +3592,11 @@ static void tcg_target_init(TCGContext *s) if (hwcap & PPC_FEATURE_ARCH_2_06) { have_isa = tcg_isa_2_06; } +#ifdef PPC_FEATURE2_ARCH_2_07 + if (hwcap2 & PPC_FEATURE2_ARCH_2_07) { + have_isa = tcg_isa_2_07; + } +#endif #ifdef PPC_FEATURE2_ARCH_3_00 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) { have_isa = tcg_isa_3_00; -- cgit v1.1 From b2dda6400c1ef10b7918a7775997575b174062b3 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 03:50:41 +0000 Subject: tcg/ppc: Update vector support for v2.07 VSX These new instructions are conditional only on MSR.VSX and are thus part of the VSX instruction set, and not Altivec. This includes double-word loads and stores. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index bc3a669..6321e07 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -470,10 +470,12 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define LVEWX XO31(71) #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */ #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */ +#define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */ #define STVX XO31(231) #define STVEWX XO31(199) #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */ +#define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */ #define VADDSBS VX4(768) #define VADDUBS VX4(512) @@ -1156,6 +1158,10 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset); break; } + if (have_isa_2_07 && have_vsx) { + tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset); + break; + } tcg_debug_assert((offset & 3) == 0); tcg_out_mem_long(s, 0, LVEWX, ret, base, offset); shift = (offset - 4) & 0xc; @@ -1203,6 +1209,11 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, tcg_out_mem_long(s, STW, STWX, arg, base, offset); break; } + if (have_isa_2_07 && have_vsx) { + tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset); + break; + } + assert((offset & 3) == 0); tcg_debug_assert((offset & 3) == 0); shift = (offset - 4) & 0xc; if (shift) { -- cgit v1.1 From 7097312d37d3021cac9bb30a7f8c4660d2a25cd0 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 03:59:46 +0000 Subject: tcg/ppc: Update vector support for v2.07 FP These new instructions are conditional on MSR.FP when TX=0 and MSR.VEC when TX=1. Since we only care about the Altivec registers, and force TX=1, we can consider these to be Altivec instructions. Since Altivec is true for any use of vector types, we only need test have_isa_2_07. This includes moves to and from the integer registers. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 6321e07..840464a 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -586,6 +586,11 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */ #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */ +#define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */ +#define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */ +#define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */ +#define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */ + #define RT(r) ((r)<<21) #define RS(r) ((r)<<21) #define RA(r) ((r)<<16) @@ -715,12 +720,27 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) tcg_debug_assert(TCG_TARGET_REG_BITS == 64); /* fallthru */ case TCG_TYPE_I32: - if (ret < TCG_REG_V0 && arg < TCG_REG_V0) { - tcg_out32(s, OR | SAB(arg, ret, arg)); - break; - } else if (ret < TCG_REG_V0 || arg < TCG_REG_V0) { - /* Altivec does not support vector/integer moves. */ - return false; + if (ret < TCG_REG_V0) { + if (arg < TCG_REG_V0) { + tcg_out32(s, OR | SAB(arg, ret, arg)); + break; + } else if (have_isa_2_07) { + tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD) + | VRT(arg) | RA(ret)); + break; + } else { + /* Altivec does not support vector->integer moves. */ + return false; + } + } else if (arg < TCG_REG_V0) { + if (have_isa_2_07) { + tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD) + | VRT(ret) | RA(arg)); + break; + } else { + /* Altivec does not support integer->vector moves. */ + return false; + } } /* fallthru */ case TCG_TYPE_V64: -- cgit v1.1 From d7cd6a2f251c54c989fa35858beafe4a25c789af Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 04:21:22 +0000 Subject: tcg/ppc: Update vector support for v3.00 Altivec These new instructions are conditional only on MSR.VEC and are thus part of the Altivec instruction set, and not VSX. This includes negation and compare not equal. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.h | 2 +- tcg/ppc/tcg-target.inc.c | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/tcg/ppc/tcg-target.h b/tcg/ppc/tcg-target.h index 13197ed..4fa21f0 100644 --- a/tcg/ppc/tcg-target.h +++ b/tcg/ppc/tcg-target.h @@ -159,7 +159,7 @@ extern bool have_vsx; #define TCG_TARGET_HAS_andc_vec 1 #define TCG_TARGET_HAS_orc_vec have_isa_2_07 #define TCG_TARGET_HAS_not_vec 1 -#define TCG_TARGET_HAS_neg_vec 0 +#define TCG_TARGET_HAS_neg_vec have_isa_3_00 #define TCG_TARGET_HAS_abs_vec 0 #define TCG_TARGET_HAS_shi_vec 0 #define TCG_TARGET_HAS_shs_vec 0 diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 840464a..bd9259c 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -499,6 +499,9 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VSUBUWM VX4(1152) #define VSUBUDM VX4(1216) /* v2.07 */ +#define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */ +#define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */ + #define VMAXSB VX4(258) #define VMAXSH VX4(322) #define VMAXSW VX4(386) @@ -528,6 +531,9 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define VCMPGTUH VX4(582) #define VCMPGTUW VX4(646) #define VCMPGTUD VX4(711) /* v2.07 */ +#define VCMPNEB VX4(7) /* v3.00 */ +#define VCMPNEH VX4(71) /* v3.00 */ +#define VCMPNEW VX4(135) /* v3.00 */ #define VSLB VX4(260) #define VSLH VX4(324) @@ -2976,6 +2982,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_shri_vec: case INDEX_op_sari_vec: return vece <= MO_32 || have_isa_2_07 ? -1 : 0; + case INDEX_op_neg_vec: + return vece >= MO_32 && have_isa_3_00; case INDEX_op_mul_vec: switch (vece) { case MO_8: @@ -3090,7 +3098,9 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, static const uint32_t add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM }, sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM }, + neg_op[4] = { 0, 0, VNEGW, VNEGD }, eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD }, + ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 }, gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD }, gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD }, ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 }, @@ -3132,6 +3142,11 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case INDEX_op_sub_vec: insn = sub_op[vece]; break; + case INDEX_op_neg_vec: + insn = neg_op[vece]; + a2 = a1; + a1 = 0; + break; case INDEX_op_mul_vec: tcg_debug_assert(vece == MO_32 && have_isa_2_07); insn = VMULUWM; @@ -3194,6 +3209,9 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, case TCG_COND_EQ: insn = eq_op[vece]; break; + case TCG_COND_NE: + insn = ne_op[vece]; + break; case TCG_COND_GT: insn = gts_op[vece]; break; @@ -3276,6 +3294,10 @@ static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, case TCG_COND_GTU: break; case TCG_COND_NE: + if (have_isa_3_00 && vece <= MO_32) { + break; + } + /* fall through */ case TCG_COND_LE: case TCG_COND_LEU: need_inv = true; @@ -3599,6 +3621,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) case INDEX_op_dup2_vec: return &v_v_v; case INDEX_op_not_vec: + case INDEX_op_neg_vec: case INDEX_op_dup_vec: return &v_v; case INDEX_op_ld_vec: -- cgit v1.1 From 6e11cde15074a9b218d89bfb9bbf8ac6f7a881c5 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 04:36:26 +0000 Subject: tcg/ppc: Update vector support for v3.00 load/store These new instructions are a mix of those like LXSD that are only conditional only on MSR.VEC and those like LXV that are conditional on MSR.VEC for TX=1. Thus, in the end, we can consider all of these as Altivec instructions. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 47 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index bd9259c..5b7d1bd 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -471,11 +471,16 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */ #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */ #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */ +#define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */ +#define LXSD (OPCD(57) | 2) /* v3.00 */ +#define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */ #define STVX XO31(231) #define STVEWX XO31(199) #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */ #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */ +#define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */ +#define STXSD (OPCD(61) | 2) /* v3.00 */ #define VADDSBS VX4(768) #define VADDUBS VX4(512) @@ -1114,7 +1119,7 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, TCGReg base, tcg_target_long offset) { tcg_target_long orig = offset, l0, l1, extra = 0, align = 0; - bool is_store = false; + bool is_int_store = false; TCGReg rs = TCG_REG_TMP1; switch (opi) { @@ -1127,11 +1132,19 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, break; } break; + case LXSD: + case STXSD: + align = 3; + break; + case LXV: + case STXV: + align = 15; + break; case STD: align = 3; /* FALLTHRU */ case STB: case STH: case STW: - is_store = true; + is_int_store = true; break; } @@ -1140,7 +1153,7 @@ static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt, if (rs == base) { rs = TCG_REG_R0; } - tcg_debug_assert(!is_store || rs != rt); + tcg_debug_assert(!is_int_store || rs != rt); tcg_out_movi(s, TCG_TYPE_PTR, rs, orig); tcg_out32(s, opx | TAB(rt & 31, base, rs)); return; @@ -1205,7 +1218,8 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, case TCG_TYPE_V64: tcg_debug_assert(ret >= TCG_REG_V0); if (have_vsx) { - tcg_out_mem_long(s, 0, LXSDX, ret, base, offset); + tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX, + ret, base, offset); break; } tcg_debug_assert((offset & 7) == 0); @@ -1217,7 +1231,8 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, case TCG_TYPE_V128: tcg_debug_assert(ret >= TCG_REG_V0); tcg_debug_assert((offset & 15) == 0); - tcg_out_mem_long(s, 0, LVX, ret, base, offset); + tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0, + LVX, ret, base, offset); break; default: g_assert_not_reached(); @@ -1258,7 +1273,8 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, case TCG_TYPE_V64: tcg_debug_assert(arg >= TCG_REG_V0); if (have_vsx) { - tcg_out_mem_long(s, 0, STXSDX, arg, base, offset); + tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0, + STXSDX, arg, base, offset); break; } tcg_debug_assert((offset & 7) == 0); @@ -1271,7 +1287,8 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, break; case TCG_TYPE_V128: tcg_debug_assert(arg >= TCG_REG_V0); - tcg_out_mem_long(s, 0, STVX, arg, base, offset); + tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0, + STVX, arg, base, offset); break; default: g_assert_not_reached(); @@ -3042,7 +3059,11 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, tcg_debug_assert(out >= TCG_REG_V0); switch (vece) { case MO_8: - tcg_out_mem_long(s, 0, LVEBX, out, base, offset); + if (have_isa_3_00) { + tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16); + } else { + tcg_out_mem_long(s, 0, LVEBX, out, base, offset); + } elt = extract32(offset, 0, 4); #ifndef HOST_WORDS_BIGENDIAN elt ^= 15; @@ -3051,7 +3072,11 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, break; case MO_16: tcg_debug_assert((offset & 1) == 0); - tcg_out_mem_long(s, 0, LVEHX, out, base, offset); + if (have_isa_3_00) { + tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16); + } else { + tcg_out_mem_long(s, 0, LVEHX, out, base, offset); + } elt = extract32(offset, 1, 3); #ifndef HOST_WORDS_BIGENDIAN elt ^= 7; @@ -3059,6 +3084,10 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16)); break; case MO_32: + if (have_isa_3_00) { + tcg_out_mem_long(s, 0, LXVWSX, out, base, offset); + break; + } tcg_debug_assert((offset & 3) == 0); tcg_out_mem_long(s, 0, LVEWX, out, base, offset); elt = extract32(offset, 2, 2); -- cgit v1.1 From b7ce3cff21e1c944a17ddfc088545d8662a278a7 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Mon, 30 Sep 2019 04:44:44 +0000 Subject: tcg/ppc: Update vector support for v3.00 dup/dupi These new instructions are conditional on MSR.VEC for TX=1, so we can consider these Altivec instructions. Tested-by: Mark Cave-Ayland Reviewed-by: Aleksandar Markovic Signed-off-by: Richard Henderson --- tcg/ppc/tcg-target.inc.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 5b7d1bd..d308d69 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -596,11 +596,14 @@ static int tcg_target_const_match(tcg_target_long val, TCGType type, #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */ #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */ +#define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */ #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */ #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */ #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */ #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */ +#define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */ +#define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */ #define RT(r) ((r)<<21) #define RS(r) ((r)<<21) @@ -931,6 +934,10 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, TCGReg ret, return; } } + if (have_isa_3_00 && val == (tcg_target_long)dup_const(MO_8, val)) { + tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11)); + return; + } /* * Otherwise we must load the value from the constant pool. @@ -3021,7 +3028,22 @@ static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, TCGReg dst, TCGReg src) { tcg_debug_assert(dst >= TCG_REG_V0); - tcg_debug_assert(src >= TCG_REG_V0); + + /* Splat from integer reg allowed via constraints for v3.00. */ + if (src < TCG_REG_V0) { + tcg_debug_assert(have_isa_3_00); + switch (vece) { + case MO_64: + tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src)); + return true; + case MO_32: + tcg_out32(s, MTVSRWS | VRT(dst) | RA(src)); + return true; + default: + /* Fail, so that we fall back on either dupm or mov+dup. */ + return false; + } + } /* * Recall we use (or emulate) VSX integer loads, so the integer is @@ -3482,6 +3504,7 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) static const TCGTargetOpDef sub2 = { .args_ct_str = { "r", "r", "rI", "rZM", "r", "r" } }; static const TCGTargetOpDef v_r = { .args_ct_str = { "v", "r" } }; + static const TCGTargetOpDef v_vr = { .args_ct_str = { "v", "vr" } }; static const TCGTargetOpDef v_v = { .args_ct_str = { "v", "v" } }; static const TCGTargetOpDef v_v_v = { .args_ct_str = { "v", "v", "v" } }; static const TCGTargetOpDef v_v_v_v @@ -3651,8 +3674,9 @@ static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) return &v_v_v; case INDEX_op_not_vec: case INDEX_op_neg_vec: - case INDEX_op_dup_vec: return &v_v; + case INDEX_op_dup_vec: + return have_isa_3_00 ? &v_vr : &v_v; case INDEX_op_ld_vec: case INDEX_op_st_vec: case INDEX_op_dupm_vec: -- cgit v1.1 From e8f22f76845e08ec838d5198a8c25d207ba52ff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Tue, 1 Oct 2019 17:04:26 +0100 Subject: cpus: kick all vCPUs when running thread=single MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit qemu_cpu_kick is used for a number of reasons including to indicate there is work to be done. However when thread=single the old qemu_cpu_kick_rr_cpu only advanced the vCPU to the next executing one which can lead to a hang in the case that: a) the kick is from outside the vCPUs (e.g. iothread) b) the timers are paused (i.e. iothread calling run_on_cpu) To avoid this lets split qemu_cpu_kick_rr into two functions. One for the timer which continues to advance to the next timeslice and another for all other kicks. Message-Id: <20191001160426.26644-1-alex.bennee@linaro.org> Reviewed-by: Paolo Bonzini Reviewed-by: Richard Henderson Signed-off-by: Alex Bennée Signed-off-by: Richard Henderson --- cpus.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/cpus.c b/cpus.c index d2c61ff..bee7209 100644 --- a/cpus.c +++ b/cpus.c @@ -949,8 +949,8 @@ static inline int64_t qemu_tcg_next_kick(void) return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; } -/* Kick the currently round-robin scheduled vCPU */ -static void qemu_cpu_kick_rr_cpu(void) +/* Kick the currently round-robin scheduled vCPU to next */ +static void qemu_cpu_kick_rr_next_cpu(void) { CPUState *cpu; do { @@ -961,6 +961,16 @@ static void qemu_cpu_kick_rr_cpu(void) } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); } +/* Kick all RR vCPUs */ +static void qemu_cpu_kick_rr_cpus(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_exit(cpu); + }; +} + static void do_nothing(CPUState *cpu, run_on_cpu_data unused) { } @@ -993,7 +1003,7 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type) static void kick_tcg_thread(void *opaque) { timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); - qemu_cpu_kick_rr_cpu(); + qemu_cpu_kick_rr_next_cpu(); } static void start_tcg_kick_timer(void) @@ -1828,9 +1838,11 @@ void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); if (tcg_enabled()) { - cpu_exit(cpu); - /* NOP unless doing single-thread RR */ - qemu_cpu_kick_rr_cpu(); + if (qemu_tcg_mttcg_enabled()) { + cpu_exit(cpu); + } else { + qemu_cpu_kick_rr_cpus(); + } } else { if (hax_enabled()) { /* -- cgit v1.1