aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2020-01-06 18:00:15 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2020-01-06 18:00:15 +0000
commita0643f028e43aa2a5b09907295ecaadedac2d295 (patch)
treebe2c3cd841d6c76521bace096462f0309b2d060f /gcc
parent8a0ae3c130cd1e6beb0087a6967d33d8203f0dce (diff)
downloadgcc-a0643f028e43aa2a5b09907295ecaadedac2d295.zip
gcc-a0643f028e43aa2a5b09907295ecaadedac2d295.tar.gz
gcc-a0643f028e43aa2a5b09907295ecaadedac2d295.tar.bz2
Require equal shift amounts for IFN_DIV_POW2
IFN_DIV_POW2 currently requires all elements to be shifted by the same amount, in a similar way as for WIDEN_LSHIFT_EXPR. This patch enforces that when building the SLP tree. If in future targets want to support IFN_DIV_POW2 without this restriction, we'll probably need the kind of vector-vector/ vector-scalar split that we already have for normal shifts. 2020-01-06 Richard Sandiford <richard.sandiford@arm.com> gcc/ * tree-vect-slp.c (vect_build_slp_tree_1): Require all shifts in an IFN_DIV_POW2 node to be equal. gcc/testsuite/ * gcc.target/aarch64/sve/asrdiv_1.c: Remove trailing %s. * gcc.target/aarch64/sve/asrdiv_2.c: New test. * gcc.target/aarch64/sve/asrdiv_3.c: Likewise. From-SVN: r279908
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/testsuite/ChangeLog6
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/asrdiv_1.c8
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/asrdiv_2.c19
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/asrdiv_3.c19
-rw-r--r--gcc/tree-vect-slp.c30
6 files changed, 74 insertions, 13 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d72076f..80ace59 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,10 @@
2020-01-06 Richard Sandiford <richard.sandiford@arm.com>
+ * tree-vect-slp.c (vect_build_slp_tree_1): Require all shifts
+ in an IFN_DIV_POW2 node to be equal.
+
+2020-01-06 Richard Sandiford <richard.sandiford@arm.com>
+
* tree-vect-stmts.c (vect_check_load_store_mask): Rename to...
(vect_check_scalar_mask): ...this.
(vectorizable_store, vectorizable_load): Update call accordingly.
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 3a6f6c6..3e778e9 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,5 +1,11 @@
2020-01-06 Richard Sandiford <richard.sandiford@arm.com>
+ * gcc.target/aarch64/sve/asrdiv_1.c: Remove trailing %s.
+ * gcc.target/aarch64/sve/asrdiv_2.c: New test.
+ * gcc.target/aarch64/sve/asrdiv_3.c: Likewise.
+
+2020-01-06 Richard Sandiford <richard.sandiford@arm.com>
+
* gcc.dg/vect/vect-cond-arith-8.c: New test.
* gcc.target/aarch64/sve/cond_fmul_5.c: Likewise.
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_1.c b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_1.c
index 615d8b8..16638af 100644
--- a/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_1.c
+++ b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_1.c
@@ -45,7 +45,7 @@ DIVMOD (64);
/* { dg-final { scan-assembler-times {\tlsl\tz[0-9]+\.d, z[0-9]+\.d, #33\n} 1 } } */
/* { dg-final { scan-assembler-times {\tsub\tz[0-9]+\.d, z[0-9]+\.d, z[0-9]+\.d\n} 1 } } */
-/* { dg-final { scan-assembler-not {\tasr\t%} } } */
-/* { dg-final { scan-assembler-not {\tlsr\t%} } } */
-/* { dg-final { scan-assembler-not {\tcmplt\t%} } } */
-/* { dg-final { scan-assembler-not {\tand\t%} } } */
+/* { dg-final { scan-assembler-not {\tasr\t} } } */
+/* { dg-final { scan-assembler-not {\tlsr\t} } } */
+/* { dg-final { scan-assembler-not {\tcmplt\t} } } */
+/* { dg-final { scan-assembler-not {\tand\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_2.c b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_2.c
new file mode 100644
index 0000000..73f51df
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_2.c
@@ -0,0 +1,19 @@
+/* { dg-options "-O2 -ftree-vectorize -msve-vector-bits=256" } */
+/* Originally from gcc.dg/vect/pr51583-3.c. */
+
+int a[8], b[8];
+
+void
+f3 (void)
+{
+ a[0] = b[0] / 8;
+ a[1] = b[1] / 4;
+ a[2] = b[2] / 8;
+ a[3] = b[3] / 4;
+ a[4] = b[4] / 8;
+ a[5] = b[5] / 4;
+ a[6] = b[6] / 8;
+ a[7] = b[7] / 4;
+}
+
+/* { dg-final { scan-assembler-not {\tasrd\t} } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_3.c b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_3.c
new file mode 100644
index 0000000..f340d51
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/asrdiv_3.c
@@ -0,0 +1,19 @@
+/* { dg-options "-O2 -ftree-vectorize -msve-vector-bits=256" } */
+/* Originally from gcc.dg/vect/pr51583-3.c. */
+
+int a[8], b[8];
+
+void
+f3 (void)
+{
+ a[0] = b[0] / 8;
+ a[1] = b[1] / 8;
+ a[2] = b[2] / 8;
+ a[3] = b[3] / 8;
+ a[4] = b[4] / 8;
+ a[5] = b[5] / 8;
+ a[6] = b[6] / 8;
+ a[7] = b[7] / 8;
+}
+
+/* { dg-final { scan-assembler-times {\tasrd\t} 1 } } */
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index e9bd884..9cb724b 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -885,7 +885,8 @@ vect_build_slp_tree_1 (unsigned char *swap,
&& !vect_update_shared_vectype (stmt_info, vectype))
continue;
- if (gcall *call_stmt = dyn_cast <gcall *> (stmt))
+ gcall *call_stmt = dyn_cast <gcall *> (stmt);
+ if (call_stmt)
{
rhs_code = CALL_EXPR;
@@ -971,6 +972,12 @@ vect_build_slp_tree_1 (unsigned char *swap,
need_same_oprnds = true;
first_op1 = gimple_assign_rhs2 (stmt);
}
+ else if (call_stmt
+ && gimple_call_internal_p (call_stmt, IFN_DIV_POW2))
+ {
+ need_same_oprnds = true;
+ first_op1 = gimple_call_arg (call_stmt, 1);
+ }
}
else
{
@@ -1008,15 +1015,20 @@ vect_build_slp_tree_1 (unsigned char *swap,
continue;
}
- if (need_same_oprnds
- && !operand_equal_p (first_op1, gimple_assign_rhs2 (stmt), 0))
+ if (need_same_oprnds)
{
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: different shift "
- "arguments in %G", stmt);
- /* Mismatch. */
- continue;
+ tree other_op1 = (call_stmt
+ ? gimple_call_arg (call_stmt, 1)
+ : gimple_assign_rhs2 (stmt));
+ if (!operand_equal_p (first_op1, other_op1, 0))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: different shift "
+ "arguments in %G", stmt);
+ /* Mismatch. */
+ continue;
+ }
}
if (!load_p && rhs_code == CALL_EXPR)