aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRobin Dapp <rdapp@ventanamicro.com>2024-09-16 22:22:14 +0200
committerRobin Dapp <rdapp@ventanamicro.com>2024-12-13 10:12:16 +0100
commit71bfc8c33e63f4a566079d34ed3bc98f45133e96 (patch)
tree9bd70ddad15c121e154e1a517f536078b90bba3e /gcc
parentcfdab86f20f6e77d9c8bf982989f78ef975c7611 (diff)
downloadgcc-71bfc8c33e63f4a566079d34ed3bc98f45133e96.zip
gcc-71bfc8c33e63f4a566079d34ed3bc98f45133e96.tar.gz
gcc-71bfc8c33e63f4a566079d34ed3bc98f45133e96.tar.bz2
RISC-V: Add slide to perm_const strategies.
This patch adds a shuffle_slide_patterns to expand_vec_perm_const. It recognizes permutations like {0, 1, 4, 5} or {2, 3, 6, 7} which can be constructed by a slideup or slidedown of one of the vectors into the other one. gcc/ChangeLog: * config/riscv/riscv-v.cc (shuffle_slide_patterns): New. (expand_vec_perm_const_1): Call new function. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide-run.c: New test. * gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/riscv-v.cc99
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide-run.c266
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide.c207
3 files changed, 572 insertions, 0 deletions
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 2530fd9..42c4e7d 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -3397,6 +3397,103 @@ shuffle_compress_patterns (struct expand_vec_perm_d *d)
return true;
}
+/* Recognize patterns like [4 5 6 7 12 13 14 15] where either the lower
+ or the higher parts of both vectors are combined into one. */
+
+static bool
+shuffle_slide_patterns (struct expand_vec_perm_d *d)
+{
+ machine_mode vmode = d->vmode;
+ poly_int64 vec_len = d->perm.length ();
+
+ if (!vec_len.is_constant ())
+ return false;
+
+ int vlen = vec_len.to_constant ();
+ if (vlen < 4)
+ return false;
+
+ if (d->one_vector_p)
+ return false;
+
+ /* For a slideup OP0 can stay, for a slidedown OP1 can.
+ The former requires that the first element of the permutation
+ is the first element of OP0, the latter that the last permutation
+ element is the last element of OP1. */
+ bool slideup = false;
+ bool slidedown = false;
+
+ /* For a slideup the permutation must start at OP0's first element. */
+ if (known_eq (d->perm[0], 0))
+ slideup = true;
+
+ /* For a slidedown the permutation must end at OP1's last element. */
+ if (known_eq (d->perm[vlen - 1], 2 * vlen - 1))
+ slidedown = true;
+
+ if (slideup && slidedown)
+ return false;
+
+ if (!slideup && !slidedown)
+ return false;
+
+ /* Check for a monotonic sequence with one pivot. */
+ int pivot = -1;
+ for (int i = 0; i < vlen; i++)
+ {
+ if (pivot == -1 && known_ge (d->perm[i], vec_len))
+ pivot = i;
+ if (i > 0 && i != pivot
+ && maybe_ne (d->perm[i], d->perm[i - 1] + 1))
+ return false;
+ }
+
+ if (pivot == -1)
+ return false;
+
+ /* For a slideup OP1's part (to be slid up) must be a low part,
+ i.e. starting with its first element. */
+ if (slideup && maybe_ne (d->perm[pivot], vlen))
+ return false;
+
+ /* For a slidedown OP0's part (to be slid down) must be a high part,
+ i.e. ending with its last element. */
+ if (slidedown && maybe_ne (d->perm[pivot - 1], vlen - 1))
+ return false;
+
+ /* Success! */
+ if (d->testing_p)
+ return true;
+
+ /* PIVOT is the start of the lower/higher part of OP1 or OP2.
+ For a slideup it indicates how many elements of OP1 to
+ skip/slide over. For a slidedown it indicates how long
+ OP1's high part is, while VLEN - PIVOT is the amount to slide. */
+ int slide_cnt = slideup ? pivot : vlen - pivot;
+ insn_code icode;
+ if (slideup)
+ {
+ /* No need for a vector length because we slide up until the
+ end of OP1 anyway. */
+ rtx ops[] = {d->target, d->op0, d->op1, gen_int_mode (slide_cnt, Pmode)};
+ icode = code_for_pred_slide (UNSPEC_VSLIDEUP, vmode);
+ emit_vlmax_insn (icode, SLIDEUP_OP_MERGE, ops);
+ }
+ else
+ {
+ /* Here we need a length because we slide to the beginning of OP1
+ leaving the remaining elements undisturbed. */
+ int len = pivot;
+ rtx ops[] = {d->target, d->op1, d->op0,
+ gen_int_mode (slide_cnt, Pmode)};
+ icode = code_for_pred_slide (UNSPEC_VSLIDEDOWN, vmode);
+ emit_nonvlmax_insn (icode, BINARY_OP_TUMA, ops,
+ gen_int_mode (len, Pmode));
+ }
+
+ return true;
+}
+
/* Recognize decompress patterns:
1. VEC_PERM_EXPR op0 and op1
@@ -3711,6 +3808,8 @@ expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
return true;
if (shuffle_consecutive_patterns (d))
return true;
+ if (shuffle_slide_patterns (d))
+ return true;
if (shuffle_compress_patterns (d))
return true;
if (shuffle_decompress_patterns (d))
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide-run.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide-run.c
new file mode 100644
index 0000000..50cba3f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide-run.c
@@ -0,0 +1,266 @@
+/* { dg-do run } */
+/* { dg-require-effective-target riscv_v_ok } */
+/* { dg-add-options riscv_v } */
+/* { dg-additional-options "-O3 -std=gnu99 -mrvv-max-lmul=m8 -Wno-overflow" } */
+
+#include "shuffle-slide.c"
+
+#define comp(a, b, n) \
+ for (unsigned i = 0; i < n; ++i) \
+ if ((a)[i] != (b)[i]) \
+ __builtin_abort ();
+
+#define CHECK1(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void check1_##TYPE () \
+ { \
+ TYPE v10_##TYPE = (TYPE){SERIES_##NUNITS (0, NUNITS)}; \
+ TYPE v11_##TYPE = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)}; \
+ TYPE ref1_##TYPE = (TYPE){MASK1_##NUNITS (0, NUNITS)}; \
+ TYPE res1_##TYPE; \
+ permute1_##TYPE (v10_##TYPE, v11_##TYPE, &res1_##TYPE); \
+ comp (res1_##TYPE, ref1_##TYPE, NUNITS); \
+ }
+
+#define CHECK2(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void check2_##TYPE () \
+ { \
+ TYPE v20_##TYPE = (TYPE){SERIES_##NUNITS (0, NUNITS)}; \
+ TYPE v21_##TYPE = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)}; \
+ TYPE ref2_##TYPE = (TYPE){MASK1D_##NUNITS (0, NUNITS)}; \
+ TYPE res2_##TYPE; \
+ permute2_##TYPE (v20_##TYPE, v21_##TYPE, &res2_##TYPE); \
+ comp (res2_##TYPE, ref2_##TYPE, NUNITS); \
+ }
+
+#define CHECK3(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void check3_##TYPE () \
+ { \
+ TYPE v30_##TYPE = (TYPE){SERIES_##NUNITS (0, NUNITS)}; \
+ TYPE v31_##TYPE = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)}; \
+ TYPE ref3_##TYPE = (TYPE){MASK2U_##NUNITS (0, NUNITS)}; \
+ TYPE res3_##TYPE; \
+ permute3_##TYPE (v30_##TYPE, v31_##TYPE, &res3_##TYPE); \
+ comp (res3_##TYPE, ref3_##TYPE, NUNITS); \
+ }
+
+#define CHECK4(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void check4_##TYPE () \
+ { \
+ TYPE v40_##TYPE = (TYPE){SERIES_##NUNITS (0, NUNITS)}; \
+ TYPE v41_##TYPE = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)}; \
+ TYPE ref4_##TYPE = (TYPE){MASK3U_##NUNITS (0, NUNITS)}; \
+ TYPE res4_##TYPE; \
+ permute4_##TYPE (v40_##TYPE, v41_##TYPE, &res4_##TYPE); \
+ comp (res4_##TYPE, ref4_##TYPE, NUNITS); \
+ }
+
+#define CHECK5(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void check5_##TYPE () \
+ { \
+ TYPE v50_##TYPE = (TYPE){SERIES_##NUNITS (0, NUNITS)}; \
+ TYPE v51_##TYPE = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)}; \
+ TYPE ref5_##TYPE = (TYPE){MASK2D_##NUNITS (0, NUNITS)}; \
+ TYPE res5_##TYPE; \
+ permute5_##TYPE (v50_##TYPE, v51_##TYPE, &res5_##TYPE); \
+ comp (res5_##TYPE, ref5_##TYPE, NUNITS); \
+ }
+
+#define CHECK6(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void check6_##TYPE () \
+ { \
+ TYPE v60_##TYPE = (TYPE){SERIES_##NUNITS (0, NUNITS)}; \
+ TYPE v61_##TYPE = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)}; \
+ TYPE ref6_##TYPE = (TYPE){MASK3D_##NUNITS (0, NUNITS)}; \
+ TYPE res6_##TYPE; \
+ permute6_##TYPE (v60_##TYPE, v61_##TYPE, &res6_##TYPE); \
+ comp (res6_##TYPE, ref6_##TYPE, NUNITS); \
+ }
+
+#define CHECK_ALL(T) \
+ T (vnx4qi, 4) \
+ T (vnx8qi, 8) \
+ T (vnx16qi, 16) \
+ T (vnx32qi, 32) \
+ T (vnx64qi, 64) \
+ T (vnx128qi, 128) \
+ T (vnx4hi, 4) \
+ T (vnx8hi, 8) \
+ T (vnx16hi, 16) \
+ T (vnx32hi, 32) \
+ T (vnx64hi, 64) \
+ T (vnx4si, 4) \
+ T (vnx8si, 8) \
+ T (vnx16si, 16) \
+ T (vnx32si, 32) \
+ T (vnx4di, 4) \
+ T (vnx8di, 8) \
+ T (vnx16di, 16) \
+ T (vnx4sf, 4) \
+ T (vnx8sf, 8) \
+ T (vnx16sf, 16) \
+ T (vnx32sf, 32) \
+ T (vnx4df, 4) \
+ T (vnx8df, 8) \
+ T (vnx16df, 16)
+
+CHECK_ALL (CHECK1)
+CHECK_ALL (CHECK2)
+CHECK_ALL (CHECK3)
+CHECK_ALL (CHECK4)
+CHECK_ALL (CHECK5)
+CHECK_ALL (CHECK6)
+
+int
+main ()
+{
+ check1_vnx4qi ();
+ check1_vnx8qi ();
+ check1_vnx16qi ();
+ check1_vnx32qi ();
+ check1_vnx64qi ();
+ check1_vnx128qi ();
+ check1_vnx4hi ();
+ check1_vnx8hi ();
+ check1_vnx16hi ();
+ check1_vnx32hi ();
+ check1_vnx64hi ();
+ check1_vnx4si ();
+ check1_vnx8si ();
+ check1_vnx16si ();
+ check1_vnx32si ();
+ check1_vnx4di ();
+ check1_vnx8di ();
+ check1_vnx16di ();
+ check1_vnx4sf ();
+ check1_vnx8sf ();
+ check1_vnx16sf ();
+ check1_vnx32sf ();
+ check1_vnx4df ();
+ check1_vnx8df ();
+ check1_vnx16df ();
+ check2_vnx4qi ();
+ check2_vnx8qi ();
+ check2_vnx16qi ();
+ check2_vnx32qi ();
+ check2_vnx64qi ();
+ check2_vnx128qi ();
+ check2_vnx4hi ();
+ check2_vnx8hi ();
+ check2_vnx16hi ();
+ check2_vnx32hi ();
+ check2_vnx64hi ();
+ check2_vnx4si ();
+ check2_vnx8si ();
+ check2_vnx16si ();
+ check2_vnx32si ();
+ check2_vnx4di ();
+ check2_vnx8di ();
+ check2_vnx16di ();
+ check2_vnx4sf ();
+ check2_vnx8sf ();
+ check2_vnx16sf ();
+ check2_vnx32sf ();
+ check2_vnx4df ();
+ check2_vnx8df ();
+ check2_vnx16df ();
+ check3_vnx4qi ();
+ check3_vnx8qi ();
+ check3_vnx16qi ();
+ check3_vnx32qi ();
+ check3_vnx64qi ();
+ check3_vnx128qi ();
+ check3_vnx4hi ();
+ check3_vnx8hi ();
+ check3_vnx16hi ();
+ check3_vnx32hi ();
+ check3_vnx64hi ();
+ check3_vnx4si ();
+ check3_vnx8si ();
+ check3_vnx16si ();
+ check3_vnx32si ();
+ check3_vnx4di ();
+ check3_vnx8di ();
+ check3_vnx16di ();
+ check3_vnx4sf ();
+ check3_vnx8sf ();
+ check3_vnx16sf ();
+ check3_vnx32sf ();
+ check3_vnx4df ();
+ check3_vnx8df ();
+ check3_vnx16df ();
+ check4_vnx4qi ();
+ check4_vnx8qi ();
+ check4_vnx16qi ();
+ check4_vnx32qi ();
+ check4_vnx64qi ();
+ check4_vnx128qi ();
+ check4_vnx4hi ();
+ check4_vnx8hi ();
+ check4_vnx16hi ();
+ check4_vnx32hi ();
+ check4_vnx64hi ();
+ check4_vnx4si ();
+ check4_vnx8si ();
+ check4_vnx16si ();
+ check4_vnx32si ();
+ check4_vnx4di ();
+ check4_vnx8di ();
+ check4_vnx16di ();
+ check4_vnx4sf ();
+ check4_vnx8sf ();
+ check4_vnx16sf ();
+ check4_vnx32sf ();
+ check4_vnx4df ();
+ check4_vnx8df ();
+ check4_vnx16df ();
+ check5_vnx4qi ();
+ check5_vnx8qi ();
+ check5_vnx16qi ();
+ check5_vnx32qi ();
+ check5_vnx64qi ();
+ check5_vnx128qi ();
+ check5_vnx4hi ();
+ check5_vnx8hi ();
+ check5_vnx16hi ();
+ check5_vnx32hi ();
+ check5_vnx64hi ();
+ check5_vnx4si ();
+ check5_vnx8si ();
+ check5_vnx16si ();
+ check5_vnx32si ();
+ check5_vnx4di ();
+ check5_vnx8di ();
+ check5_vnx16di ();
+ check5_vnx4sf ();
+ check5_vnx8sf ();
+ check5_vnx16sf ();
+ check5_vnx32sf ();
+ check5_vnx4df ();
+ check5_vnx8df ();
+ check5_vnx16df ();
+ check6_vnx4qi ();
+ check6_vnx8qi ();
+ check6_vnx16qi ();
+ check6_vnx32qi ();
+ check6_vnx64qi ();
+ check6_vnx128qi ();
+ check6_vnx4hi ();
+ check6_vnx8hi ();
+ check6_vnx16hi ();
+ check6_vnx32hi ();
+ check6_vnx64hi ();
+ check6_vnx4si ();
+ check6_vnx8si ();
+ check6_vnx16si ();
+ check6_vnx32si ();
+ check6_vnx4di ();
+ check6_vnx8di ();
+ check6_vnx16di ();
+ check6_vnx4sf ();
+ check6_vnx8sf ();
+ check6_vnx16sf ();
+ check6_vnx32sf ();
+ check6_vnx4df ();
+ check6_vnx8df ();
+ check6_vnx16df ();
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide.c
new file mode 100644
index 0000000..4f40094
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-slide.c
@@ -0,0 +1,207 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -march=rv64gcv -mrvv-max-lmul=m8 -Wno-overflow" } */
+
+#include "perm.h"
+
+#define SERIES_2(x, y) (x), (x + 1)
+#define SERIES_4(x, y) SERIES_2 (x, y), SERIES_2 (x + 2, y)
+#define SERIES_8(x, y) SERIES_4 (x, y), SERIES_4 (x + 4, y)
+#define SERIES_16(x, y) SERIES_8 (x, y), SERIES_8 (x + 8, y)
+#define SERIES_32(x, y) SERIES_16 (x, y), SERIES_16 (x + 16, y)
+#define SERIES_64(x, y) SERIES_32 (x, y), SERIES_32 (x + 32, y)
+#define SERIES_128(x, y) SERIES_64 (x, y), SERIES_64 (x + 64, y)
+
+#define MASK1_4(X, Y) SERIES_2 (X, Y), SERIES_2 (X + 4, Y)
+#define MASK1_8(X, Y) SERIES_4 (X, Y), SERIES_4 (X + 8, Y)
+#define MASK1_16(X, Y) SERIES_8 (X, Y), SERIES_8 (X + 16, Y)
+#define MASK1_32(X, Y) SERIES_16 (X, Y), SERIES_16 (X + 32, Y)
+#define MASK1_64(X, Y) SERIES_32 (X, Y), SERIES_32 (X + 64, Y)
+#define MASK1_128(X, Y) SERIES_64 (X, Y), SERIES_64 (X + 128, Y)
+
+#define MASK1D_4(X, Y) SERIES_2 (X + 2, Y), SERIES_2 (X + 6, Y)
+#define MASK1D_8(X, Y) SERIES_4 (X + 4, Y), SERIES_4 (X + 12, Y)
+#define MASK1D_16(X, Y) SERIES_8 (X + 8, Y), SERIES_8 (X + 24, Y)
+#define MASK1D_32(X, Y) SERIES_16 (X + 16, Y), SERIES_16 (X + 48, Y)
+#define MASK1D_64(X, Y) SERIES_32 (X + 32, Y), SERIES_32 (X + 96, Y)
+#define MASK1D_128(X, Y) SERIES_64 (X + 64, Y), SERIES_64 (X + 192, Y)
+
+#define MASK2U_4(X, Y) 0, 1, 2, 4
+#define MASK2U_8(X, Y) 0, 1, 2, 3, 4, 5, 6, 8
+#define MASK2U_16(X, Y) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16
+#define MASK2U_32(X, Y) \
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, \
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 32
+#define MASK2U_64(X, Y) \
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, \
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, \
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, \
+ 57, 58, 59, 60, 61, 62, 64
+#define MASK2U_128(X, Y) \
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, \
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, \
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, \
+ 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, \
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, \
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, \
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, \
+ 124, 125, 126, 128
+
+#define MASK3U_4(X, Y) 0, 4, 5, 6
+#define MASK3U_8(X, Y) 0, 8, 9, 10, 11, 12, 13, 14
+#define MASK3U_16(X, Y) \
+ 0, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30
+#define MASK3U_32(X, Y) \
+ 0, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, \
+ 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62
+#define MASK3U_64(X, Y) \
+ 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, \
+ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, \
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, \
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126
+#define MASK3U_128(X, Y) \
+ 0, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, \
+ 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, \
+ 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, \
+ 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, \
+ 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, \
+ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, \
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, \
+ 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, \
+ 247, 248, 249, 250, 251, 252, 253, 254
+
+#define MASK2D_4(X, Y) 1, 2, 3, 7
+#define MASK2D_8(X, Y) 1, 2, 3, 4, 5, 6, 7, 15
+#define MASK2D_16(X, Y) 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
+#define MASK2D_32(X, Y) \
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, \
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 63
+#define MASK2D_64(X, Y) \
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, \
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, \
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, \
+ 58, 59, 60, 61, 62, 63, 127
+#define MASK2D_128(X, Y) \
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, \
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, \
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, \
+ 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, \
+ 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, \
+ 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, \
+ 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, \
+ 125, 126, 127, 255
+
+#define MASK3D_4(X, Y) 3, 5, 6, 7
+#define MASK3D_8(X, Y) 7, 9, 10, 11, 12, 13, 14, 15
+#define MASK3D_16(X, Y) \
+ 15, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+#define MASK3D_32(X, Y) \
+ 31, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, \
+ 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63
+#define MASK3D_64(X, Y) \
+ 63, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, \
+ 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, \
+ 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, \
+ 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127
+#define MASK3D_128(X, Y) \
+ 127, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, \
+ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, \
+ 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, \
+ 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, \
+ 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, \
+ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, \
+ 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, \
+ 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, \
+ 248, 249, 250, 251, 252, 253, 254, 255
+
+#define PERMUTE1(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void permute1_##TYPE (TYPE values1, TYPE values2, \
+ TYPE *out) \
+ { \
+ TYPE v = __builtin_shufflevector (values1, values2, \
+ MASK1_##NUNITS (0, NUNITS)); \
+ *(TYPE *) out = v; \
+ }
+
+#define PERMUTE2(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void permute2_##TYPE (TYPE values1, TYPE values2, \
+ TYPE *out) \
+ { \
+ TYPE v = __builtin_shufflevector (values1, values2, \
+ MASK1D_##NUNITS (0, NUNITS)); \
+ *(TYPE *) out = v; \
+ }
+
+#define PERMUTE3(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void permute3_##TYPE (TYPE values1, TYPE values2, \
+ TYPE *out) \
+ { \
+ TYPE v = __builtin_shufflevector (values1, values2, \
+ MASK2U_##NUNITS (0, NUNITS)); \
+ *(TYPE *) out = v; \
+ }
+
+#define PERMUTE4(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void permute4_##TYPE (TYPE values1, TYPE values2, \
+ TYPE *out) \
+ { \
+ TYPE v = __builtin_shufflevector (values1, values2, \
+ MASK3U_##NUNITS (0, NUNITS)); \
+ *(TYPE *) out = v; \
+ }
+
+#define PERMUTE5(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void permute5_##TYPE (TYPE values1, TYPE values2, \
+ TYPE *out) \
+ { \
+ TYPE v = __builtin_shufflevector (values1, values2, \
+ MASK2D_##NUNITS (0, NUNITS)); \
+ *(TYPE *) out = v; \
+ }
+
+#define PERMUTE6(TYPE, NUNITS) \
+ __attribute__ ((noipa)) void permute6_##TYPE (TYPE values1, TYPE values2, \
+ TYPE *out) \
+ { \
+ TYPE v = __builtin_shufflevector (values1, values2, \
+ MASK3D_##NUNITS (0, NUNITS)); \
+ *(TYPE *) out = v; \
+ }
+
+#define TEST_ALL(T) \
+ T (vnx4qi, 4) \
+ T (vnx8qi, 8) \
+ T (vnx16qi, 16) \
+ T (vnx32qi, 32) \
+ T (vnx64qi, 64) \
+ T (vnx128qi, 128) \
+ T (vnx4hi, 4) \
+ T (vnx8hi, 8) \
+ T (vnx16hi, 16) \
+ T (vnx32hi, 32) \
+ T (vnx64hi, 64) \
+ T (vnx4si, 4) \
+ T (vnx8si, 8) \
+ T (vnx16si, 16) \
+ T (vnx32si, 32) \
+ T (vnx4di, 4) \
+ T (vnx8di, 8) \
+ T (vnx16di, 16) \
+ T (vnx4sf, 4) \
+ T (vnx8sf, 8) \
+ T (vnx16sf, 16) \
+ T (vnx32sf, 32) \
+ T (vnx4df, 4) \
+ T (vnx8df, 8) \
+ T (vnx16df, 16)
+
+TEST_ALL (PERMUTE1)
+TEST_ALL (PERMUTE2)
+TEST_ALL (PERMUTE3)
+TEST_ALL (PERMUTE4)
+TEST_ALL (PERMUTE5)
+TEST_ALL (PERMUTE6)
+
+/* { dg-final { scan-assembler-times "vslideup" 75 } } */
+/* { dg-final { scan-assembler-times "vslidedown" 75 } } */
+/* { dg-final { scan-assembler-not "vrgather" } } */
+/* { dg-final { scan-assembler-not "vmerge" } } */