aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorMartin Jambor <mjambor@suse.cz>2023-03-14 18:53:16 +0100
committerMartin Jambor <mjambor@suse.cz>2023-03-14 18:56:23 +0100
commit1526ecd739fc6a13329abdcbdbf7c2df57c22177 (patch)
tree3d5f65303e9ef24bd281af3f6dd5937c2397a7c2 /gcc
parent68ba253bda74d6c6e77726d98184a6faee5e7337 (diff)
downloadgcc-1526ecd739fc6a13329abdcbdbf7c2df57c22177.zip
gcc-1526ecd739fc6a13329abdcbdbf7c2df57c22177.tar.gz
gcc-1526ecd739fc6a13329abdcbdbf7c2df57c22177.tar.bz2
ipa-cp: Improve updating behavior when profile counts have gone bad
Looking into the behavior of profile count updating in PR 107925, I noticed that an option not considered possible was actually happening, and - with the guesswork in place to distribute unexplained counts - it simply can happen. Currently it is handled by dropping the counts to local estimated zero, whereas it is probably better to leave the count as they are but drop the category to GUESSED_GLOBAL0 - which is what profile_count::combine_with_ipa_count in a similar case (or so I hope :-) gcc/ChangeLog: 2023-02-20 Martin Jambor <mjambor@suse.cz> PR ipa/107925 * ipa-cp.cc (update_profiling_info): Drop counts of orig_node to global0 instead of zeroing when it does not have as many counts as it should.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ipa-cp.cc29
1 files changed, 22 insertions, 7 deletions
diff --git a/gcc/ipa-cp.cc b/gcc/ipa-cp.cc
index 5a6b41c..6477bb8 100644
--- a/gcc/ipa-cp.cc
+++ b/gcc/ipa-cp.cc
@@ -4969,10 +4969,20 @@ update_profiling_info (struct cgraph_node *orig_node,
false);
new_sum = stats.count_sum;
+ bool orig_edges_processed = false;
if (new_sum > orig_node_count)
{
- /* TODO: Perhaps this should be gcc_unreachable ()? */
- remainder = profile_count::zero ().guessed_local ();
+ /* TODO: Profile has alreay gone astray, keep what we have but lower it
+ to global0 category. */
+ remainder = orig_node->count.global0 ();
+
+ for (cgraph_edge *cs = orig_node->callees; cs; cs = cs->next_callee)
+ cs->count = cs->count.global0 ();
+ for (cgraph_edge *cs = orig_node->indirect_calls;
+ cs;
+ cs = cs->next_callee)
+ cs->count = cs->count.global0 ();
+ orig_edges_processed = true;
}
else if (stats.rec_count_sum.nonzero_p ())
{
@@ -5070,11 +5080,16 @@ update_profiling_info (struct cgraph_node *orig_node,
for (cgraph_edge *cs = new_node->indirect_calls; cs; cs = cs->next_callee)
cs->count = cs->count.apply_scale (new_sum, orig_new_node_count);
- profile_count::adjust_for_ipa_scaling (&remainder, &orig_node_count);
- for (cgraph_edge *cs = orig_node->callees; cs; cs = cs->next_callee)
- cs->count = cs->count.apply_scale (remainder, orig_node_count);
- for (cgraph_edge *cs = orig_node->indirect_calls; cs; cs = cs->next_callee)
- cs->count = cs->count.apply_scale (remainder, orig_node_count);
+ if (!orig_edges_processed)
+ {
+ profile_count::adjust_for_ipa_scaling (&remainder, &orig_node_count);
+ for (cgraph_edge *cs = orig_node->callees; cs; cs = cs->next_callee)
+ cs->count = cs->count.apply_scale (remainder, orig_node_count);
+ for (cgraph_edge *cs = orig_node->indirect_calls;
+ cs;
+ cs = cs->next_callee)
+ cs->count = cs->count.apply_scale (remainder, orig_node_count);
+ }
if (dump_file)
{