aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/testsuite/ChangeLog5
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr67790.c40
-rw-r--r--gcc/tree-vect-loop.c68
4 files changed, 77 insertions, 47 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index f700f76..910a178 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,14 @@
+2015-11-18 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/67790
+ * tree-vect-loop.c (vect_is_simple_reduction_1): Remove
+ IL rewrite for MINUS_EXPR reductions, rename back to ...
+ (vect_is_simple_reduction): ... this, removing the wrapper.
+ (vect_force_simple_reduction): Adjust.
+ (vectorizable_reduction): Adjust reduc_index for MINUS_EXPR
+ reductions and make use if reduc_index in all places. For
+ the final reduction of MINUS_EXPR use PLUS_EXPR.
+
2015-11-18 Alan Modra <amodra@gmail.com>
* configure.ac (POWERPC64_TOC_POINTER_ALIGNMENT): Pass -z norelro
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 0c7900e..5be70e2 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,8 @@
+2015-11-18 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/67790
+ * gcc.dg/vect/pr67790.c: New testcase.
+
2015-11-17 David Edelsohn <dje.gcc@gmail.com>
* g++.dg/cpp1y/pr58708.C: Define wfoo based on sizeof wchar_t.
diff --git a/gcc/testsuite/gcc.dg/vect/pr67790.c b/gcc/testsuite/gcc.dg/vect/pr67790.c
new file mode 100644
index 0000000..5e2d506
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr67790.c
@@ -0,0 +1,40 @@
+/* { dg-require-effective-target vect_int } */
+
+#include "tree-vect.h"
+
+struct {
+ int x_advance;
+ int y_advance;
+} a[256];
+
+int b, c;
+
+void __attribute__((noinline,noclone)) fn1()
+{
+ for (int i = 0; i < 256; i++)
+ {
+ c -= a[i].x_advance;
+ b -= a[i].y_advance;
+ }
+}
+
+int main()
+{
+ check_vect ();
+
+ for (int i = 0; i < 256; ++i)
+ {
+ a[i].x_advance = i;
+ a[i].y_advance = -i + 3;
+ __asm__ volatile ("" : : : "memory");
+ }
+
+ fn1();
+
+ if (c != -32640 || b != 31872)
+ abort ();
+
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump "vectorizing stmts using SLP" "vect" } } */
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 80937ec..c3dbfd3 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -2468,16 +2468,13 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
if (a[i] < val)
ret_val = a[i];
- If MODIFY is true it tries also to rework the code in-place to enable
- detection of more reduction patterns. For the time being we rewrite
- "res -= RHS" into "rhs += -RHS" when it seems worthwhile.
*/
static gimple *
-vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple *phi,
- bool check_reduction, bool *double_reduc,
- bool modify, bool need_wrapping_integral_overflow,
- enum vect_reduction_type *v_reduc_type)
+vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
+ bool check_reduction, bool *double_reduc,
+ bool need_wrapping_integral_overflow,
+ enum vect_reduction_type *v_reduc_type)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
@@ -2634,7 +2631,6 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple *phi,
gimple instruction for the first simple tests and only do this
if we're allowed to change code at all. */
if (code == MINUS_EXPR
- && modify
&& (op1 = gimple_assign_rhs1 (def_stmt))
&& TREE_CODE (op1) == SSA_NAME
&& SSA_NAME_DEF_STMT (op1) == phi)
@@ -2791,23 +2787,6 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple *phi,
}
}
- /* If we detected "res -= x[i]" earlier, rewrite it into
- "res += -x[i]" now. If this turns out to be useless reassoc
- will clean it up again. */
- if (orig_code == MINUS_EXPR)
- {
- tree rhs = gimple_assign_rhs2 (def_stmt);
- tree negrhs = make_ssa_name (TREE_TYPE (rhs));
- gimple *negate_stmt = gimple_build_assign (negrhs, NEGATE_EXPR, rhs);
- gimple_stmt_iterator gsi = gsi_for_stmt (def_stmt);
- set_vinfo_for_stmt (negate_stmt, new_stmt_vec_info (negate_stmt,
- loop_info));
- gsi_insert_before (&gsi, negate_stmt, GSI_NEW_STMT);
- gimple_assign_set_rhs2 (def_stmt, negrhs);
- gimple_assign_set_rhs_code (def_stmt, PLUS_EXPR);
- update_stmt (def_stmt);
- }
-
/* Reduction is safe. We're dealing with one of the following:
1) integer arithmetic and no trapv
2) floating point arithmetic, and special flags permit this optimization
@@ -2863,7 +2842,8 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple *phi,
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def2)))))))
{
- if (check_reduction)
+ if (check_reduction
+ && orig_code != MINUS_EXPR)
{
if (code == COND_EXPR)
{
@@ -2915,21 +2895,6 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple *phi,
return NULL;
}
-/* Wrapper around vect_is_simple_reduction_1, that won't modify code
- in-place. Arguments as there. */
-
-static gimple *
-vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
- bool check_reduction, bool *double_reduc,
- bool need_wrapping_integral_overflow,
- enum vect_reduction_type *v_reduc_type)
-{
- return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
- double_reduc, false,
- need_wrapping_integral_overflow,
- v_reduc_type);
-}
-
/* Wrapper around vect_is_simple_reduction_1, which will modify code
in-place if it enables detection of more reductions. Arguments
as there. */
@@ -2940,10 +2905,10 @@ vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi,
bool need_wrapping_integral_overflow)
{
enum vect_reduction_type v_reduc_type;
- return vect_is_simple_reduction_1 (loop_info, phi, check_reduction,
- double_reduc, true,
- need_wrapping_integral_overflow,
- &v_reduc_type);
+ return vect_is_simple_reduction (loop_info, phi, check_reduction,
+ double_reduc,
+ need_wrapping_integral_overflow,
+ &v_reduc_type);
}
/* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
@@ -5398,6 +5363,8 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
}
/* The default is that the reduction variable is the last in statement. */
int reduc_index = op_type - 1;
+ if (code == MINUS_EXPR)
+ reduc_index = 0;
if (code == COND_EXPR && slp_node)
return false;
@@ -5417,8 +5384,11 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
The last use is the reduction variable. In case of nested cycle this
assumption is not true: we use reduc_index to record the index of the
reduction variable. */
- for (i = 0; i < op_type - 1; i++)
+ for (i = 0; i < op_type; i++)
{
+ if (i == reduc_index)
+ continue;
+
/* The condition of COND_EXPR is checked in vectorizable_condition(). */
if (i == 0 && code == COND_EXPR)
continue;
@@ -5454,7 +5424,8 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
}
}
- is_simple_use = vect_is_simple_use (ops[i], loop_vinfo, &def_stmt, &dt, &tem);
+ is_simple_use = vect_is_simple_use (ops[reduc_index], loop_vinfo,
+ &def_stmt, &dt, &tem);
if (!vectype_in)
vectype_in = tem;
gcc_assert (is_simple_use);
@@ -5625,6 +5596,9 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
the vector code inside the loop can be used for the epilog code. */
orig_code = code;
+ if (code == MINUS_EXPR)
+ orig_code = PLUS_EXPR;
+
/* For simple condition reductions, replace with the actual expression
we want to base our reduction around. */
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)