aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-vect-loop.c
diff options
context:
space:
mode:
authorRichard Biener <rguenther@suse.de>2020-08-26 15:12:17 +0200
committerRichard Biener <rguenther@suse.de>2020-08-26 16:02:32 +0200
commit2130efe6ac7beba72d289e3dd145daa10aeaed54 (patch)
tree9f9f63553ccafec89b413aea2df46de595dbf809 /gcc/tree-vect-loop.c
parent3eefb302d2bd8502cb3d8fe44e672b11092ccaf6 (diff)
downloadgcc-2130efe6ac7beba72d289e3dd145daa10aeaed54.zip
gcc-2130efe6ac7beba72d289e3dd145daa10aeaed54.tar.gz
gcc-2130efe6ac7beba72d289e3dd145daa10aeaed54.tar.bz2
tree-optimization/96698 - fix ICE when vectorizing nested cycles
This fixes vectorized PHI latch edge updating and delay it until all of the loop is code generated to deal with the case that the latch def is a PHI in the same block. 2020-08-26 Richard Biener <rguenther@suse.de> PR tree-optimization/96698 * tree-vectorizer.h (loop_vec_info::reduc_latch_defs): New. (loop_vec_info::reduc_latch_slp_defs): Likewise. * tree-vect-stmts.c (vect_transform_stmt): Only record stmts to update PHI latches from, perform the update ... * tree-vect-loop.c (vect_transform_loop): ... here after vectorizing those PHIs. (info_for_reduction): Properly handle non-reduction PHIs. * gcc.dg/vect/pr96698.c: New testcase.
Diffstat (limited to 'gcc/tree-vect-loop.c')
-rw-r--r--gcc/tree-vect-loop.c35
1 files changed, 34 insertions, 1 deletions
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index a92813e..50abb2b 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -4646,7 +4646,8 @@ info_for_reduction (vec_info *vinfo, stmt_vec_info stmt_info)
{
stmt_info = vect_orig_stmt (stmt_info);
gcc_assert (STMT_VINFO_REDUC_DEF (stmt_info));
- if (!is_a <gphi *> (stmt_info->stmt))
+ if (!is_a <gphi *> (stmt_info->stmt)
+ || !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
gphi *phi = as_a <gphi *> (stmt_info->stmt);
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
@@ -9031,6 +9032,38 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
}
}
+ /* Fill in backedge defs of reductions. */
+ for (unsigned i = 0; i < loop_vinfo->reduc_latch_defs.length (); ++i)
+ {
+ stmt_vec_info stmt_info = loop_vinfo->reduc_latch_defs[i];
+ stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
+ vec<gimple *> &phi_info
+ = STMT_VINFO_VEC_STMTS (STMT_VINFO_REDUC_DEF (orig_stmt_info));
+ vec<gimple *> &vec_stmt
+ = STMT_VINFO_VEC_STMTS (stmt_info);
+ gcc_assert (phi_info.length () == vec_stmt.length ());
+ gphi *phi
+ = dyn_cast <gphi *> (STMT_VINFO_REDUC_DEF (orig_stmt_info)->stmt);
+ edge e = loop_latch_edge (gimple_bb (phi_info[0])->loop_father);
+ for (unsigned j = 0; j < phi_info.length (); ++j)
+ add_phi_arg (as_a <gphi *> (phi_info[j]),
+ gimple_get_lhs (vec_stmt[j]), e,
+ gimple_phi_arg_location (phi, e->dest_idx));
+ }
+ for (unsigned i = 0; i < loop_vinfo->reduc_latch_slp_defs.length (); ++i)
+ {
+ slp_tree slp_node = loop_vinfo->reduc_latch_slp_defs[i].first;
+ slp_tree phi_node = loop_vinfo->reduc_latch_slp_defs[i].second;
+ gphi *phi = as_a <gphi *> (SLP_TREE_SCALAR_STMTS (phi_node)[0]->stmt);
+ e = loop_latch_edge (gimple_bb (phi)->loop_father);
+ gcc_assert (SLP_TREE_VEC_STMTS (phi_node).length ()
+ == SLP_TREE_VEC_STMTS (slp_node).length ());
+ for (unsigned j = 0; j < SLP_TREE_VEC_STMTS (phi_node).length (); ++j)
+ add_phi_arg (as_a <gphi *> (SLP_TREE_VEC_STMTS (phi_node)[j]),
+ vect_get_slp_vect_def (slp_node, j),
+ e, gimple_phi_arg_location (phi, e->dest_idx));
+ }
+
/* Stub out scalar statements that must not survive vectorization.
Doing this here helps with grouped statements, or statements that
are involved in patterns. */