diff options
author | Richard Biener <rguenther@suse.de> | 2024-06-26 09:25:27 +0200 |
---|---|---|
committer | Richard Biener <rguenth@gcc.gnu.org> | 2024-06-26 14:05:38 +0200 |
commit | f80db5495d5f8455b3003951727eb6c8dc67d81d (patch) | |
tree | 3d74fbae4cd919ee62ffdd93e7ebd111f585fe90 | |
parent | 7a9b535d8abe27abdaa68cdcb22172a666030d06 (diff) | |
download | gcc-f80db5495d5f8455b3003951727eb6c8dc67d81d.zip gcc-f80db5495d5f8455b3003951727eb6c8dc67d81d.tar.gz gcc-f80db5495d5f8455b3003951727eb6c8dc67d81d.tar.bz2 |
tree-optimization/115652 - adjust insertion gsi for SLP
The following adjusts how SLP computes the insertion location. In
particular it advanced the insert iterator of the found last_stmt.
The vectorizer will later insert stmts _before_ it. But we also
have the constraint that possibly masked ops may not be scheduled
outside of the loop and as we do not model the loop mask in the
SLP graph we have to adjust for that. The following moves this
to after the advance since it isn't compatible with that as the
current GIMPLE_COND exception shows. The PR is about in-order
reduction vectorization which also isn't happy when that's the
very first stmt.
PR tree-optimization/115652
* tree-vect-slp.cc (vect_schedule_slp_node): Advance the
iterator based on last_stmt only for vector defs.
-rw-r--r-- | gcc/tree-vect-slp.cc | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc index b47b7e8c..1f5b3fc 100644 --- a/gcc/tree-vect-slp.cc +++ b/gcc/tree-vect-slp.cc @@ -9629,16 +9629,6 @@ vect_schedule_slp_node (vec_info *vinfo, /* Emit other stmts after the children vectorized defs which is earliest possible. */ gimple *last_stmt = NULL; - if (auto loop_vinfo = dyn_cast <loop_vec_info> (vinfo)) - if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) - || LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo)) - { - /* But avoid scheduling internal defs outside of the loop when - we might have only implicitly tracked loop mask/len defs. */ - gimple_stmt_iterator si - = gsi_after_labels (LOOP_VINFO_LOOP (loop_vinfo)->header); - last_stmt = *si; - } bool seen_vector_def = false; FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child) if (SLP_TREE_DEF_TYPE (child) == vect_internal_def) @@ -9747,12 +9737,19 @@ vect_schedule_slp_node (vec_info *vinfo, else { si = gsi_for_stmt (last_stmt); - /* When we're getting gsi_after_labels from the starting - condition of a fully masked/len loop avoid insertion - after a GIMPLE_COND that can appear as the only header - stmt with early break vectorization. */ - if (gimple_code (last_stmt) != GIMPLE_COND) - gsi_next (&si); + gsi_next (&si); + + /* Avoid scheduling internal defs outside of the loop when + we might have only implicitly tracked loop mask/len defs. */ + if (auto loop_vinfo = dyn_cast <loop_vec_info> (vinfo)) + if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) + || LOOP_VINFO_FULLY_WITH_LENGTH_P (loop_vinfo)) + { + gimple_stmt_iterator si2 + = gsi_after_labels (LOOP_VINFO_LOOP (loop_vinfo)->header); + if (vect_stmt_dominates_stmt_p (last_stmt, *si2)) + si = si2; + } } } |