diff options
author | Jan Hubicka <jh@suse.cz> | 2007-02-09 23:49:30 +0100 |
---|---|---|
committer | Jan Hubicka <hubicka@gcc.gnu.org> | 2007-02-09 22:49:30 +0000 |
commit | 45a80bb9e800503749c797d4c57a4b6829ca9bf7 (patch) | |
tree | dd0cf0d6f5ee79e534f85b6bf4b3b2f75372bd8c /gcc/ipa-inline.c | |
parent | f2477b062c9c639e5354ef705022a47dcb61e213 (diff) | |
download | gcc-45a80bb9e800503749c797d4c57a4b6829ca9bf7.zip gcc-45a80bb9e800503749c797d4c57a4b6829ca9bf7.tar.gz gcc-45a80bb9e800503749c797d4c57a4b6829ca9bf7.tar.bz2 |
Makefile.in (passes.o, [...]): Add dependencies.
* Makefile.in (passes.o, ipa-inline.o): Add dependencies.
* cgraphbuild.c (build_cgraph_edges): Compute frequencies.
(rebuild_cgraph_edges): Likewise.
* cgraph.c (cgraph_set_call_stmt): Add new argument frequency.
(dump_cgraph_node): Dump frequencies.
(cgraph_clone_edge): Add frequency scales.
(cgraph_clone_node): Add freuqnecy.
* cgraph.h (cgraph_edge): Add freuqnecy argument.
(CGRAPH_FREQ_BASE, CGRAPH_FREQ_MAX): New constants.
(cgraph_create_edge, cgraph_clone_edge, cgraph_clone_node): Update.
* tree-pass.h (TODO_rebuild_frequencies): New constant.
* cgraphunit.c (verify_cgraph_node): Verify frequencies.
(cgraph_copy_node_for_versioning): Update call of cgraph_clone_edge.
(save_inline_function_body): Likewise.
* ipa-inline.c: inluce rtl.h
(cgraph_clone_inlined_nods): Update call of cgraph_clone_node.
(cgraph_edge_badness): Use frequencies.
(cgraph_decide_recursive_inlining): Update clonning.
(cgraph_decide_inlining_of_small_function): Dump frequency.
* predict.c (estimate_bb_frequencies): Export.
* predict.h (estimate_bb_frequencies): Declare.
* tree-inline.c (copy_bb): Watch overflows.
(expand_call_inline): Update call of cgraph_create_edge.
(optimize_inline_calls): Use TODO flags to update frequnecies.
* passes.h: Include predict.h
(init_optimization_passes): Move profile ahead.
(execute_function_todo): Handle TODO_rebuild_frequencies.
From-SVN: r121780
Diffstat (limited to 'gcc/ipa-inline.c')
-rw-r--r-- | gcc/ipa-inline.c | 93 |
1 files changed, 63 insertions, 30 deletions
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c index 736a3ae..50b02fb 100644 --- a/gcc/ipa-inline.c +++ b/gcc/ipa-inline.c @@ -139,6 +139,7 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "coverage.h" #include "ggc.h" #include "tree-flow.h" +#include "rtl.h" /* Mode incremental inliner operate on: @@ -215,7 +216,7 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate, bool update_o else { struct cgraph_node *n; - n = cgraph_clone_node (e->callee, e->count, e->loop_nest, + n = cgraph_clone_node (e->callee, e->count, e->frequency, e->loop_nest, update_original); cgraph_redirect_edge_callee (e, n); } @@ -478,44 +479,75 @@ cgraph_maybe_hot_edge_p (struct cgraph_edge *edge) smallest badness are inlined first. After each inlining is performed the costs of all caller edges of nodes affected are recomputed so the metrics may accurately depend on values such as number of inlinable callers - of the function or function body size. - - With profiling we use number of executions of each edge to drive the cost. - We also should distinguish hot and cold calls where the cold calls are - inlined into only when code size is overall improved. - */ + of the function or function body size. */ static int cgraph_edge_badness (struct cgraph_edge *edge) { + int badness; + int growth = + cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee); + + growth -= edge->caller->global.insns; + + /* Always prefer inlining saving code size. */ + if (growth <= 0) + badness = INT_MIN - growth; + + /* When profiling is available, base priorities -(#calls / growth). + So we optimize for overall number of "executed" inlined calls. */ if (max_count) + badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth; + + /* When function local profile is available, base priorities on + growth / frequency, so we optimize for overall frequency of inlined + calls. This is not too accurate since while the call might be frequent + within function, the function itself is infrequent. + + Other objective to optimize for is number of different calls inlined. + We add the estimated growth after inlining all functions to biass the + priorities slightly in this direction (so fewer times called functions + of the same size gets priority). */ + else if (flag_guess_branch_prob) { + int div = edge->frequency * 100 / CGRAPH_FREQ_BASE; int growth = cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee); growth -= edge->caller->global.insns; + badness = growth * 256; + + /* Decrease badness if call is nested. */ + /* Compress the range so we don't overflow. */ + if (div > 256) + div = 256 + ceil_log2 (div) - 8; + if (div < 1) + div = 1; + if (badness > 0) + badness /= div; + badness += cgraph_estimate_growth (edge->callee); + } + /* When function local profile is not available or it does not give + useful information (ie frequency is zero), base the cost on + loop nest and overall size growth, so we optimize for overall number + of functions fully inlined in program. */ + else + { + int nest = MIN (edge->loop_nest, 8); + badness = cgraph_estimate_growth (edge->callee) * 256; - /* Always prefer inlining saving code size. */ - if (growth <= 0) - return INT_MIN - growth; - return ((int)((double)edge->count * INT_MIN / max_count)) / growth; + /* Decrease badness if call is nested. */ + if (badness > 0) + badness >>= nest; + else + { + badness <<= nest; + } } + /* Make recursive inlining happen always after other inlining is done. */ + if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL)) + return badness + 1; else - { - int nest = MIN (edge->loop_nest, 8); - int badness = cgraph_estimate_growth (edge->callee) * 256; - - /* Decrease badness if call is nested. */ - if (badness > 0) - badness >>= nest; - else - badness <<= nest; - - /* Make recursive inlining happen always after other inlining is done. */ - if (cgraph_recursive_inlining_p (edge->caller, edge->callee, NULL)) - return badness + 1; - else - return badness; - } + return badness; } /* Recompute heap nodes for each of caller edge. */ @@ -651,7 +683,7 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node) cgraph_node_name (node)); /* We need original clone to copy around. */ - master_clone = cgraph_clone_node (node, node->count, 1, false); + master_clone = cgraph_clone_node (node, node->count, CGRAPH_FREQ_BASE, 1, false); master_clone->needed = true; for (e = master_clone->callees; e; e = e->next_callee) if (!e->inline_failed) @@ -831,10 +863,11 @@ cgraph_decide_inlining_of_small_functions (void) fprintf (dump_file, " to be inlined into %s\n" " Estimated growth after inlined into all callees is %+i insns.\n" - " Estimated badness is %i.\n", + " Estimated badness is %i, frequency %.2f.\n", cgraph_node_name (edge->caller), cgraph_estimate_growth (edge->callee), - cgraph_edge_badness (edge)); + cgraph_edge_badness (edge), + edge->frequency / (double)CGRAPH_FREQ_BASE); if (edge->count) fprintf (dump_file," Called "HOST_WIDEST_INT_PRINT_DEC"x\n", edge->count); } |