aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Biener <rguenther@suse.de>2024-07-25 13:39:49 +0200
committerThomas Koenig <tkoenig@gcc.gnu.org>2024-07-28 19:05:57 +0200
commit44334ba54905c5884123178c6faa8cdbc05e98c5 (patch)
tree6c4ff47dede8a4bc16e73935769d82515e8831f3
parentf22677a48b644e15304cf28f79b156c127f3df81 (diff)
downloadgcc-44334ba54905c5884123178c6faa8cdbc05e98c5.zip
gcc-44334ba54905c5884123178c6faa8cdbc05e98c5.tar.gz
gcc-44334ba54905c5884123178c6faa8cdbc05e98c5.tar.bz2
tree-optimization/116083 - improve behavior when SLP discovery limit is reached
The following avoids some useless work when the SLP discovery limit is reached, for example allocating a node to cache the failure and starting discovery on split store groups when analyzing BBs. It does not address the issue in the PR which is a gratious budget for discovery when the store group size approaches the number of overall statements. PR tree-optimization/116083 * tree-vect-slp.cc (vect_build_slp_tree): Do not allocate a discovery fail node when we reached the discovery limit. (vect_build_slp_instance): Terminate early when the discovery limit is reached.
-rw-r--r--gcc/tree-vect-slp.cc26
1 files changed, 12 insertions, 14 deletions
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 55ae496..5f0d9e5 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -1751,13 +1751,6 @@ vect_build_slp_tree (vec_info *vinfo,
return NULL;
}
- /* Seed the bst_map with a stub node to be filled by vect_build_slp_tree_2
- so we can pick up backedge destinations during discovery. */
- slp_tree res = new _slp_tree;
- SLP_TREE_DEF_TYPE (res) = vect_internal_def;
- SLP_TREE_SCALAR_STMTS (res) = stmts;
- bst_map->put (stmts.copy (), res);
-
/* Single-lane SLP doesn't have the chance of run-away, do not account
it to the limit. */
if (stmts.length () > 1)
@@ -1767,18 +1760,19 @@ vect_build_slp_tree (vec_info *vinfo,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"SLP discovery limit exceeded\n");
- /* Mark the node invalid so we can detect those when still in use
- as backedge destinations. */
- SLP_TREE_SCALAR_STMTS (res) = vNULL;
- SLP_TREE_DEF_TYPE (res) = vect_uninitialized_def;
- res->failed = XNEWVEC (bool, group_size);
- memset (res->failed, 0, sizeof (bool) * group_size);
memset (matches, 0, sizeof (bool) * group_size);
return NULL;
}
--*limit;
}
+ /* Seed the bst_map with a stub node to be filled by vect_build_slp_tree_2
+ so we can pick up backedge destinations during discovery. */
+ slp_tree res = new _slp_tree;
+ SLP_TREE_DEF_TYPE (res) = vect_internal_def;
+ SLP_TREE_SCALAR_STMTS (res) = stmts;
+ bst_map->put (stmts.copy (), res);
+
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"starting SLP discovery for node %p\n", (void *) res);
@@ -3363,6 +3357,10 @@ vect_build_slp_instance (vec_info *vinfo,
/* ??? We need stmt_info for group splitting. */
stmt_vec_info stmt_info_)
{
+ /* If there's no budget left bail out early. */
+ if (*limit == 0)
+ return false;
+
if (kind == slp_inst_kind_ctor)
{
if (dump_enabled_p ())
@@ -3520,7 +3518,7 @@ vect_build_slp_instance (vec_info *vinfo,
stmt_vec_info stmt_info = stmt_info_;
/* Try to break the group up into pieces. */
- if (kind == slp_inst_kind_store)
+ if (*limit > 0 && kind == slp_inst_kind_store)
{
/* ??? We could delay all the actual splitting of store-groups
until after SLP discovery of the original group completed.