diff options
author | Jan Hubicka <jh@suse.cz> | 2019-11-20 12:46:41 +0100 |
---|---|---|
committer | Jan Hubicka <hubicka@gcc.gnu.org> | 2019-11-20 11:46:41 +0000 |
commit | 041cb6154cfd31dd249fec8cfcc6833e09e5cd06 (patch) | |
tree | 2fa13516e17fc4666b63160f4d7bb4df367f850c /gcc | |
parent | 140ee00a961fda084c1b4b3f0e7e489a917858f7 (diff) | |
download | gcc-041cb6154cfd31dd249fec8cfcc6833e09e5cd06.zip gcc-041cb6154cfd31dd249fec8cfcc6833e09e5cd06.tar.gz gcc-041cb6154cfd31dd249fec8cfcc6833e09e5cd06.tar.bz2 |
Optimize updating of badness after applying inline
* ipa-inline.c (wrapper_heuristics_may_apply): Break out from ...
(edge_badness): ... here.
(inline_small_functions): Use monotonicity of badness calculation
to avoid redundant updates.
From-SVN: r278496
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/ChangeLog | 7 | ||||
-rw-r--r-- | gcc/ipa-inline.c | 32 |
2 files changed, 34 insertions, 5 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index eacaee3..79b8210 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,10 @@ +2019-11-20 Jan Hubicka <jh@suse.cz> + + * ipa-inline.c (wrapper_heuristics_may_apply): Break out from ... + (edge_badness): ... here. + (inline_small_functions): Use monotonicity of badness calculation + to avoid redundant updates. + 2019-11-20 Richard Biener <rguenther@suse.de> * tree-vect-slp.c (vect_analyze_slp_instance): Dump diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c index becea8a..5d8b87a 100644 --- a/gcc/ipa-inline.c +++ b/gcc/ipa-inline.c @@ -1097,6 +1097,17 @@ want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold) return true; } +/* Return true if WHERE of SIZE is a possible candidate for wrapper heuristics + in estimate_edge_badness. */ + +static bool +wrapper_heuristics_may_apply (struct cgraph_node *where, int size) +{ + return size < (DECL_DECLARED_INLINE_P (where->decl) + ? inline_insns_single (where, false) + : inline_insns_auto (where, false)); +} + /* A cost model driving the inlining heuristics in a way so the edges with smallest badness are inlined first. After each inlining is performed the costs of all caller edges of nodes affected are recomputed so the @@ -1227,10 +1238,8 @@ edge_badness (struct cgraph_edge *edge, bool dump) and it is not called once. */ if (!caller_info->single_caller && overall_growth < caller_growth && caller_info->inlinable - && ipa_size_summaries->get (caller)->size - < (DECL_DECLARED_INLINE_P (caller->decl) - ? inline_insns_single (caller, false) - : inline_insns_auto (caller, false))) + && wrapper_heuristics_may_apply + (caller, ipa_size_summaries->get (caller)->size)) { if (dump) fprintf (dump_file, @@ -2158,11 +2167,24 @@ inline_small_functions (void) fprintf (dump_file, " Peeling recursion with depth %i\n", depth); gcc_checking_assert (!callee->inlined_to); + + int old_size = ipa_size_summaries->get (where)->size; + sreal old_time = ipa_fn_summaries->get (where)->time; + inline_call (edge, true, &new_indirect_edges, &overall_size, true); reset_edge_caches (edge->callee); add_new_edges_to_heap (&edge_heap, new_indirect_edges); - update_callee_keys (&edge_heap, where, updated_nodes); + /* If caller's size and time increased we do not need to update + all edges becuase badness is not going to decrease. */ + if (old_size <= ipa_size_summaries->get (where)->size + && old_time <= ipa_fn_summaries->get (where)->time + /* Wrapper penalty may be non-monotonous in this respect. + Fortunately it only affects small functions. */ + && !wrapper_heuristics_may_apply (where, old_size)) + update_callee_keys (&edge_heap, edge->callee, updated_nodes); + else + update_callee_keys (&edge_heap, where, updated_nodes); } where = edge->caller; if (where->inlined_to) |