aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/i386
diff options
context:
space:
mode:
authorRoger Sayle <roger@nextmovesoftware.com>2021-10-26 08:33:41 +0100
committerRoger Sayle <roger@nextmovesoftware.com>2021-10-26 08:33:41 +0100
commit6b8b25575570ffde37cc8997af096514b929779d (patch)
treeee8923431f1ed44c3f58d7cf988326d63b517109 /gcc/config/i386
parent4e417eea8f3f14131f7370f9bd5dd568611d11df (diff)
downloadgcc-6b8b25575570ffde37cc8997af096514b929779d.zip
gcc-6b8b25575570ffde37cc8997af096514b929779d.tar.gz
gcc-6b8b25575570ffde37cc8997af096514b929779d.tar.bz2
x86_64: Implement V1TI mode shifts/rotates by a constant
This patch provides RTL expanders to implement logical shifts and rotates of 128-bit values (stored in vector integer registers) by constant bit counts. Previously, GCC would transfer these values to a pair of integer registers (TImode) via memory to perform the operation, then transfer the result back via memory. Instead these operations are now expanded using (between 1 and 5) SSE2 vector instructions. Logical shifts by multiples of 8 can be implemented using x86_64's pslldq/psrldq instruction: ashl_8: pslldq $1, %xmm0 ret lshr_32: psrldq $4, %xmm0 ret Logical shifts by greater than 64 can use pslldq/psrldq $8, followed by a psllq/psrlq for the remaining bits: ashl_111: pslldq $8, %xmm0 psllq $47, %xmm0 ret lshr_127: psrldq $8, %xmm0 psrlq $63, %xmm0 ret The remaining logical shifts make use of the following idiom: ashl_1: movdqa %xmm0, %xmm1 psllq $1, %xmm0 pslldq $8, %xmm1 psrlq $63, %xmm1 por %xmm1, %xmm0 ret lshr_15: movdqa %xmm0, %xmm1 psrlq $15, %xmm0 psrldq $8, %xmm1 psllq $49, %xmm1 por %xmm1, %xmm0 ret Rotates by multiples of 32 can use x86_64's pshufd: rotr_32: pshufd $57, %xmm0, %xmm0 ret rotr_64: pshufd $78, %xmm0, %xmm0 ret rotr_96: pshufd $147, %xmm0, %xmm0 ret Rotates by multiples of 8 (other than multiples of 32) can make use of both pslldq and psrldq, followed by por: rotr_8: movdqa %xmm0, %xmm1 psrldq $1, %xmm0 pslldq $15, %xmm1 por %xmm1, %xmm0 ret rotr_112: movdqa %xmm0, %xmm1 psrldq $14, %xmm0 pslldq $2, %xmm1 por %xmm1, %xmm0 ret And the remaining rotates use one or two pshufd, followed by a psrld/pslld/por sequence: rotr_1: movdqa %xmm0, %xmm1 pshufd $57, %xmm0, %xmm0 psrld $1, %xmm1 pslld $31, %xmm0 por %xmm1, %xmm0 ret rotr_63: pshufd $78, %xmm0, %xmm1 pshufd $57, %xmm0, %xmm0 pslld $1, %xmm1 psrld $31, %xmm0 por %xmm1, %xmm0 ret rotr_111: pshufd $147, %xmm0, %xmm1 pslld $17, %xmm0 psrld $15, %xmm1 por %xmm1, %xmm0 ret The new test case, sse2-v1ti-shift.c, is a run-time check to confirm that the results of V1TImode shifts/rotates by constants, exactly match the expected results of TImode operations, for various input test vectors. 2021-10-26 Roger Sayle <roger@nextmovesoftware.com> gcc/ChangeLog * config/i386/i386-expand.c (ix86_expand_v1ti_shift): New helper function to expand V1TI mode logical shifts by integer constants. (ix86_expand_v1ti_rotate): New helper function to expand V1TI mode rotations by integer constants. * config/i386/i386-protos.h (ix86_expand_v1ti_shift, ix86_expand_v1ti_rotate): Prototype new functions here. * config/i386/sse.md (ashlv1ti3, lshrv1ti3, rotlv1ti3, rotrv1ti3): New TARGET_SSE2 expanders to implement V1TI shifts and rotations. gcc/testsuite/ChangeLog * gcc.target/i386/sse2-v1ti-shift.c: New test case.
Diffstat (limited to 'gcc/config/i386')
-rw-r--r--gcc/config/i386/i386-expand.c163
-rw-r--r--gcc/config/i386/i386-protos.h2
-rw-r--r--gcc/config/i386/sse.md44
3 files changed, 209 insertions, 0 deletions
diff --git a/gcc/config/i386/i386-expand.c b/gcc/config/i386/i386-expand.c
index 56dd99b..4c3800e 100644
--- a/gcc/config/i386/i386-expand.c
+++ b/gcc/config/i386/i386-expand.c
@@ -6157,6 +6157,169 @@ ix86_split_lshr (rtx *operands, rtx scratch, machine_mode mode)
}
}
+/* Expand V1TI mode shift (of rtx_code CODE) by constant. */
+void ix86_expand_v1ti_shift (enum rtx_code code, rtx operands[])
+{
+ HOST_WIDE_INT bits = INTVAL (operands[2]) & 127;
+ rtx op1 = force_reg (V1TImode, operands[1]);
+
+ if (bits == 0)
+ {
+ emit_move_insn (operands[0], op1);
+ return;
+ }
+
+ if ((bits & 7) == 0)
+ {
+ rtx tmp = gen_reg_rtx (V1TImode);
+ if (code == ASHIFT)
+ emit_insn (gen_sse2_ashlv1ti3 (tmp, op1, GEN_INT (bits)));
+ else
+ emit_insn (gen_sse2_lshrv1ti3 (tmp, op1, GEN_INT (bits)));
+ emit_move_insn (operands[0], tmp);
+ return;
+ }
+
+ rtx tmp1 = gen_reg_rtx (V1TImode);
+ if (code == ASHIFT)
+ emit_insn (gen_sse2_ashlv1ti3 (tmp1, op1, GEN_INT (64)));
+ else
+ emit_insn (gen_sse2_lshrv1ti3 (tmp1, op1, GEN_INT (64)));
+
+ /* tmp2 is operands[1] shifted by 64, in V2DImode. */
+ rtx tmp2 = gen_reg_rtx (V2DImode);
+ emit_move_insn (tmp2, gen_lowpart (V2DImode, tmp1));
+
+ /* tmp3 will be the V2DImode result. */
+ rtx tmp3 = gen_reg_rtx (V2DImode);
+
+ if (bits > 64)
+ {
+ if (code == ASHIFT)
+ emit_insn (gen_ashlv2di3 (tmp3, tmp2, GEN_INT (bits - 64)));
+ else
+ emit_insn (gen_lshrv2di3 (tmp3, tmp2, GEN_INT (bits - 64)));
+ }
+ else
+ {
+ /* tmp4 is operands[1], in V2DImode. */
+ rtx tmp4 = gen_reg_rtx (V2DImode);
+ emit_move_insn (tmp4, gen_lowpart (V2DImode, op1));
+
+ rtx tmp5 = gen_reg_rtx (V2DImode);
+ if (code == ASHIFT)
+ emit_insn (gen_ashlv2di3 (tmp5, tmp4, GEN_INT (bits)));
+ else
+ emit_insn (gen_lshrv2di3 (tmp5, tmp4, GEN_INT (bits)));
+
+ rtx tmp6 = gen_reg_rtx (V2DImode);
+ if (code == ASHIFT)
+ emit_insn (gen_lshrv2di3 (tmp6, tmp2, GEN_INT (64 - bits)));
+ else
+ emit_insn (gen_ashlv2di3 (tmp6, tmp2, GEN_INT (64 - bits)));
+
+ emit_insn (gen_iorv2di3 (tmp3, tmp5, tmp6));
+ }
+
+ /* Convert the result back to V1TImode and store in operands[0]. */
+ rtx tmp7 = gen_reg_rtx (V1TImode);
+ emit_move_insn (tmp7, gen_lowpart (V1TImode, tmp3));
+ emit_move_insn (operands[0], tmp7);
+}
+
+/* Expand V1TI mode rotate (of rtx_code CODE) by constant. */
+void ix86_expand_v1ti_rotate (enum rtx_code code, rtx operands[])
+{
+ HOST_WIDE_INT bits = INTVAL (operands[2]) & 127;
+ rtx op1 = force_reg (V1TImode, operands[1]);
+
+ if (bits == 0)
+ {
+ emit_move_insn (operands[0], op1);
+ return;
+ }
+
+ if (code == ROTATERT)
+ bits = 128 - bits;
+
+ if ((bits & 31) == 0)
+ {
+ rtx tmp1 = gen_reg_rtx (V4SImode);
+ rtx tmp2 = gen_reg_rtx (V4SImode);
+ rtx tmp3 = gen_reg_rtx (V1TImode);
+
+ emit_move_insn (tmp1, gen_lowpart (V4SImode, op1));
+ if (bits == 32)
+ emit_insn (gen_sse2_pshufd (tmp2, tmp1, GEN_INT (0x93)));
+ else if (bits == 64)
+ emit_insn (gen_sse2_pshufd (tmp2, tmp1, GEN_INT (0x4e)));
+ else
+ emit_insn (gen_sse2_pshufd (tmp2, tmp1, GEN_INT (0x39)));
+ emit_move_insn (tmp3, gen_lowpart (V1TImode, tmp2));
+ emit_move_insn (operands[0], tmp3);
+ return;
+ }
+
+ if ((bits & 7) == 0)
+ {
+ rtx tmp1 = gen_reg_rtx (V1TImode);
+ rtx tmp2 = gen_reg_rtx (V1TImode);
+ rtx tmp3 = gen_reg_rtx (V1TImode);
+
+ emit_insn (gen_sse2_ashlv1ti3 (tmp1, op1, GEN_INT (bits)));
+ emit_insn (gen_sse2_lshrv1ti3 (tmp2, op1, GEN_INT (128 - bits)));
+ emit_insn (gen_iorv1ti3 (tmp3, tmp1, tmp2));
+ emit_move_insn (operands[0], tmp3);
+ return;
+ }
+
+ rtx op1_v4si = gen_reg_rtx (V4SImode);
+ emit_move_insn (op1_v4si, gen_lowpart (V4SImode, op1));
+
+ rtx lobits;
+ rtx hibits;
+
+ switch (bits >> 5)
+ {
+ case 0:
+ lobits = op1_v4si;
+ hibits = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_pshufd (hibits, op1_v4si, GEN_INT (0x93)));
+ break;
+
+ case 1:
+ lobits = gen_reg_rtx (V4SImode);
+ hibits = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_pshufd (lobits, op1_v4si, GEN_INT (0x93)));
+ emit_insn (gen_sse2_pshufd (hibits, op1_v4si, GEN_INT (0x4e)));
+ break;
+
+ case 2:
+ lobits = gen_reg_rtx (V4SImode);
+ hibits = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_pshufd (lobits, op1_v4si, GEN_INT (0x4e)));
+ emit_insn (gen_sse2_pshufd (hibits, op1_v4si, GEN_INT (0x39)));
+ break;
+
+ default:
+ lobits = gen_reg_rtx (V4SImode);
+ emit_insn (gen_sse2_pshufd (lobits, op1_v4si, GEN_INT (0x39)));
+ hibits = op1_v4si;
+ break;
+ }
+
+ rtx tmp1 = gen_reg_rtx (V4SImode);
+ rtx tmp2 = gen_reg_rtx (V4SImode);
+ rtx tmp3 = gen_reg_rtx (V4SImode);
+ rtx tmp4 = gen_reg_rtx (V1TImode);
+
+ emit_insn (gen_ashlv4si3 (tmp1, lobits, GEN_INT (bits & 31)));
+ emit_insn (gen_lshrv4si3 (tmp2, hibits, GEN_INT (32 - (bits & 31))));
+ emit_insn (gen_iorv4si3 (tmp3, tmp1, tmp2));
+ emit_move_insn (tmp4, gen_lowpart (V1TImode, tmp3));
+ emit_move_insn (operands[0], tmp4);
+}
+
/* Return mode for the memcpy/memset loop counter. Prefer SImode over
DImode for constant loop counts. */
diff --git a/gcc/config/i386/i386-protos.h b/gcc/config/i386/i386-protos.h
index 708834a..9918a28 100644
--- a/gcc/config/i386/i386-protos.h
+++ b/gcc/config/i386/i386-protos.h
@@ -159,6 +159,8 @@ extern void ix86_split_long_move (rtx[]);
extern void ix86_split_ashl (rtx *, rtx, machine_mode);
extern void ix86_split_ashr (rtx *, rtx, machine_mode);
extern void ix86_split_lshr (rtx *, rtx, machine_mode);
+extern void ix86_expand_v1ti_shift (enum rtx_code, rtx[]);
+extern void ix86_expand_v1ti_rotate (enum rtx_code, rtx[]);
extern rtx ix86_find_base_term (rtx);
extern bool ix86_check_movabs (rtx, int);
extern bool ix86_check_no_addr_space (rtx);
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 431236a..bdc6067 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -15075,6 +15075,50 @@
operands[4] = gen_lowpart (<MODE>mode, operands[3]);
})
+(define_expand "ashlv1ti3"
+ [(set (match_operand:V1TI 0 "register_operand")
+ (ashift:V1TI
+ (match_operand:V1TI 1 "register_operand")
+ (match_operand:SI 2 "const_int_operand")))]
+ "TARGET_SSE2"
+{
+ ix86_expand_v1ti_shift (ASHIFT, operands);
+ DONE;
+})
+
+(define_expand "lshrv1ti3"
+ [(set (match_operand:V1TI 0 "register_operand")
+ (lshiftrt:V1TI
+ (match_operand:V1TI 1 "register_operand")
+ (match_operand:SI 2 "const_int_operand")))]
+ "TARGET_SSE2"
+{
+ ix86_expand_v1ti_shift (LSHIFTRT, operands);
+ DONE;
+})
+
+(define_expand "rotlv1ti3"
+ [(set (match_operand:V1TI 0 "register_operand")
+ (rotate:V1TI
+ (match_operand:V1TI 1 "register_operand")
+ (match_operand:SI 2 "const_int_operand")))]
+ "TARGET_SSE2"
+{
+ ix86_expand_v1ti_rotate (ROTATE, operands);
+ DONE;
+})
+
+(define_expand "rotrv1ti3"
+ [(set (match_operand:V1TI 0 "register_operand")
+ (rotatert:V1TI
+ (match_operand:V1TI 1 "register_operand")
+ (match_operand:SI 2 "const_int_operand")))]
+ "TARGET_SSE2"
+{
+ ix86_expand_v1ti_rotate (ROTATERT, operands);
+ DONE;
+})
+
(define_insn "avx512bw_<insn><mode>3"
[(set (match_operand:VIMAX_AVX512VL 0 "register_operand" "=v")
(any_lshift:VIMAX_AVX512VL