aboutsummaryrefslogtreecommitdiff
path: root/gcc/ipa-inline.c
diff options
context:
space:
mode:
authorJan Hubicka <jh@suse.cz>2009-05-15 15:22:49 +0200
committerJan Hubicka <hubicka@gcc.gnu.org>2009-05-15 13:22:49 +0000
commit1d405d7b41e3b5e2db036567b32689a785ab2d14 (patch)
tree0cc5ef2866479bb9b4496201735f8d4183c3beeb /gcc/ipa-inline.c
parent7ffa47ca1ecdd0dc05917f3ac1373e9224c6d1fe (diff)
downloadgcc-1d405d7b41e3b5e2db036567b32689a785ab2d14.zip
gcc-1d405d7b41e3b5e2db036567b32689a785ab2d14.tar.gz
gcc-1d405d7b41e3b5e2db036567b32689a785ab2d14.tar.bz2
Revert the following patch until testsuite fallout is fixed:
* cgraph.c (dump_cgraph_node): Dump size/time/benefit. * cgraph.h (struct inline_summary): New filed self_wize, size_inlining_benefit, self_time and time_inlining_benefit. (struct cgraph_global_info): Replace insns by time ans size fields. * ipa-cp (ipcp_cloning_candidate_p): Base estimate on size (ipcp_estimate_growth, ipcp_insert_stage): Likewise. (ipcp_update_callgraph): Do not touch function bodies. * ipa-inline.c: Include except.h MAX_TIME: New constant. (overall_insns): Remove (overall_size, max_benefit): New static variables. (cgraph_estimate_time_after_inlining): New function. (cgraph_estimate_size_after_inlining): Rewrite using benefits. (cgraph_clone_inlined_nodes): Update size. (cgraph_mark_inline_edge): Update size. (cgraph_estimate_growth): Use size info. (cgraph_check_inline_limits): Check size. (cgraph_default_inline_p): Likewise. (cgraph_edge_badness): Compute badness based on benefit and size cost. (cgraph_decide_recursive_inlining): Check size. (cgraph_decide_inlining_of_small_function): Update size; dump sizes and times. (cgraph_decide_inlining): Likewise. (cgraph_decide_inlining_incrementally): Likewise; honor PARAM_EARLY_INLINING_INSNS. (likely_eliminated_by_inlining_p): New predicate. (estimate_function_body_sizes): New function. (compute_inline_parameters): Use it. * except.c (must_not_throw_labels): New function. * except.h (must_not_throw_labels): Declare. * tree-inline.c (init_inline_once): Kill inlining_weigths * tree-ssa-structalias.c: Avoid uninitialized warning. * params.def (PARAM_MAX_INLINE_INSNS_SINGLE): Reduce to 300. (PARAM_MAX_INLINE_INSNS_AUTO): Reduce to 60. (PARAM_INLINE_CALL_COST): Remove. (PARAM_EARLY_INLINING_INSNS): New. From-SVN: r147575
Diffstat (limited to 'gcc/ipa-inline.c')
-rw-r--r--gcc/ipa-inline.c375
1 files changed, 92 insertions, 283 deletions
diff --git a/gcc/ipa-inline.c b/gcc/ipa-inline.c
index 17e0501..99640bf 100644
--- a/gcc/ipa-inline.c
+++ b/gcc/ipa-inline.c
@@ -138,9 +138,6 @@ along with GCC; see the file COPYING3. If not see
#include "tree-flow.h"
#include "rtl.h"
#include "ipa-prop.h"
-#include "except.h"
-
-#define MAX_TIME 1000000000
/* Mode incremental inliner operate on:
@@ -166,8 +163,8 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *, enum inlining_mode,
/* Statistics we collect about inlining algorithm. */
static int ncalls_inlined;
static int nfunctions_inlined;
-static int overall_size;
-static gcov_type max_count, max_benefit;
+static int overall_insns;
+static gcov_type max_count;
/* Holders of ipa cgraph hooks: */
static struct cgraph_node_hook_list *function_insertion_hook_holder;
@@ -178,30 +175,19 @@ inline_summary (struct cgraph_node *node)
return &node->local.inline_summary;
}
-/* Estimate self time of the function after inlining WHAT into TO. */
-
-static int
-cgraph_estimate_time_after_inlining (int frequency, struct cgraph_node *to,
- struct cgraph_node *what)
-{
- gcov_type time = (((gcov_type)what->global.time - inline_summary
- (what)->time_inlining_benefit)
- * frequency + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE
- + to->global.time;
- if (time < 0)
- time = 0;
- if (time > MAX_TIME)
- time = MAX_TIME;
- return time;
-}
-
-/* Estimate self time of the function after inlining WHAT into TO. */
+/* Estimate size of the function after inlining WHAT into TO. */
static int
cgraph_estimate_size_after_inlining (int times, struct cgraph_node *to,
struct cgraph_node *what)
{
- int size = (what->global.size - inline_summary (what)->size_inlining_benefit) * times + to->global.size;
+ int size;
+ tree fndecl = what->decl, arg;
+ int call_insns = PARAM_VALUE (PARAM_INLINE_CALL_COST);
+
+ for (arg = DECL_ARGUMENTS (fndecl); arg; arg = TREE_CHAIN (arg))
+ call_insns += estimate_move_cost (TREE_TYPE (arg));
+ size = (what->global.insns - call_insns) * times + to->global.insns;
gcc_assert (size >= 0);
return size;
}
@@ -227,10 +213,7 @@ cgraph_clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
{
gcc_assert (!e->callee->global.inlined_to);
if (e->callee->analyzed)
- {
- overall_size -= e->callee->global.size;
- nfunctions_inlined++;
- }
+ overall_insns -= e->callee->global.insns, nfunctions_inlined++;
duplicate = false;
}
else
@@ -270,7 +253,7 @@ static bool
cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
VEC (cgraph_edge_p, heap) **new_edges)
{
- int old_size = 0, new_size = 0;
+ int old_insns = 0, new_insns = 0;
struct cgraph_node *to = NULL, *what;
struct cgraph_edge *curr = e;
@@ -291,15 +274,16 @@ cgraph_mark_inline_edge (struct cgraph_edge *e, bool update_original,
/* Now update size of caller and all functions caller is inlined into. */
for (;e && !e->inline_failed; e = e->caller->callers)
{
+ old_insns = e->caller->global.insns;
+ new_insns = cgraph_estimate_size_after_inlining (1, e->caller,
+ what);
+ gcc_assert (new_insns >= 0);
to = e->caller;
- old_size = e->caller->global.size;
- new_size = cgraph_estimate_size_after_inlining (1, to, what);
- to->global.size = new_size;
- to->global.time = cgraph_estimate_time_after_inlining (e->frequency, to, what);
+ to->global.insns = new_insns;
}
gcc_assert (what->global.inlined_to == to);
- if (new_size > old_size)
- overall_size += new_size - old_size;
+ if (new_insns > old_insns)
+ overall_insns += new_insns - old_insns;
ncalls_inlined++;
if (flag_indirect_inlining)
@@ -354,7 +338,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
self_recursive = true;
if (e->inline_failed)
growth += (cgraph_estimate_size_after_inlining (1, e->caller, node)
- - e->caller->global.size);
+ - e->caller->global.insns);
}
/* ??? Wrong for non-trivially self recursive functions or cases where
@@ -362,7 +346,7 @@ cgraph_estimate_growth (struct cgraph_node *node)
as in that case we will keep the body around, but we will also avoid
some inlining. */
if (!node->needed && !DECL_EXTERNAL (node->decl) && !self_recursive)
- growth -= node->global.size;
+ growth -= node->global.insns;
node->global.estimated_growth = growth;
return growth;
@@ -397,17 +381,17 @@ cgraph_check_inline_limits (struct cgraph_node *to, struct cgraph_node *what,
/* When inlining large function body called once into small function,
take the inlined function as base for limiting the growth. */
- if (inline_summary (to)->self_size > inline_summary(what)->self_size)
- limit = inline_summary (to)->self_size;
+ if (inline_summary (to)->self_insns > inline_summary(what)->self_insns)
+ limit = inline_summary (to)->self_insns;
else
- limit = inline_summary (what)->self_size;
+ limit = inline_summary (what)->self_insns;
limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
/* Check the size after inlining against the function limits. But allow
the function to shrink if it went over the limits by forced inlining. */
newsize = cgraph_estimate_size_after_inlining (times, to, what);
- if (newsize >= to->global.size
+ if (newsize >= to->global.insns
&& newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
&& newsize > limit)
{
@@ -458,7 +442,7 @@ cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
if (DECL_DECLARED_INLINE_P (decl))
{
- if (n->global.size >= MAX_INLINE_INSNS_SINGLE)
+ if (n->global.insns >= MAX_INLINE_INSNS_SINGLE)
{
if (reason)
*reason = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
@@ -467,7 +451,7 @@ cgraph_default_inline_p (struct cgraph_node *n, cgraph_inline_failed_t *reason)
}
else
{
- if (n->global.size >= MAX_INLINE_INSNS_AUTO)
+ if (n->global.insns >= MAX_INLINE_INSNS_AUTO)
{
if (reason)
*reason = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
@@ -513,7 +497,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
int growth =
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
- growth -= edge->caller->global.size;
+ growth -= edge->caller->global.insns;
/* Always prefer inlining saving code size. */
if (growth <= 0)
@@ -522,8 +506,7 @@ cgraph_edge_badness (struct cgraph_edge *edge)
/* When profiling is available, base priorities -(#calls / growth).
So we optimize for overall number of "executed" inlined calls. */
else if (max_count)
- badness = ((int)((double)edge->count * INT_MIN / max_count / (max_benefit + 1))
- * (inline_summary (edge->callee)->time_inlining_benefit + 1)) / growth;
+ badness = ((int)((double)edge->count * INT_MIN / max_count)) / growth;
/* When function local profile is available, base priorities on
growth / frequency, so we optimize for overall frequency of inlined
@@ -536,11 +519,11 @@ cgraph_edge_badness (struct cgraph_edge *edge)
of the same size gets priority). */
else if (flag_guess_branch_prob)
{
- int div = edge->frequency * 100 / CGRAPH_FREQ_BASE + 1;
+ int div = edge->frequency * 100 / CGRAPH_FREQ_BASE;
+ int growth =
+ cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
+ growth -= edge->caller->global.insns;
badness = growth * 256;
- div *= MIN (100 * inline_summary (edge->callee)->time_inlining_benefit
- / (edge->callee->global.time + 1) + 1, 100);
-
/* Decrease badness if call is nested. */
/* Compress the range so we don't overflow. */
@@ -783,9 +766,8 @@ cgraph_decide_recursive_inlining (struct cgraph_node *node,
fibheap_delete (heap);
if (dump_file)
fprintf (dump_file,
- "\n Inlined %i times, body grown from size %i to %i, time %i to %i\n", n,
- master_clone->global.size, node->global.size,
- master_clone->global.time, node->global.time);
+ "\n Inlined %i times, body grown from %i to %i insns\n", n,
+ master_clone->global.insns, node->global.insns);
/* Remove master clone we used for inlining. We rely that clones inlined
into master clone gets queued just before master clone so we don't
@@ -863,7 +845,7 @@ cgraph_decide_inlining_of_small_functions (void)
cgraph_inline_failed_t failed_reason;
fibheap_t heap = fibheap_new ();
bitmap updated_nodes = BITMAP_ALLOC (NULL);
- int min_size, max_size;
+ int min_insns, max_insns;
VEC (cgraph_edge_p, heap) *new_indirect_edges = NULL;
if (flag_indirect_inlining)
@@ -897,26 +879,26 @@ cgraph_decide_inlining_of_small_functions (void)
}
}
- max_size = compute_max_insns (overall_size);
- min_size = overall_size;
+ max_insns = compute_max_insns (overall_insns);
+ min_insns = overall_insns;
- while (overall_size <= max_size
+ while (overall_insns <= max_insns
&& (edge = (struct cgraph_edge *) fibheap_extract_min (heap)))
{
- int old_size = overall_size;
+ int old_insns = overall_insns;
struct cgraph_node *where;
int growth =
cgraph_estimate_size_after_inlining (1, edge->caller, edge->callee);
cgraph_inline_failed_t not_good = CIF_OK;
- growth -= edge->caller->global.size;
+ growth -= edge->caller->global.insns;
if (dump_file)
{
fprintf (dump_file,
- "\nConsidering %s with %i size\n",
+ "\nConsidering %s with %i insns\n",
cgraph_node_name (edge->callee),
- edge->callee->global.size);
+ edge->callee->global.insns);
fprintf (dump_file,
" to be inlined into %s in %s:%i\n"
" Estimated growth after inlined into all callees is %+i insns.\n"
@@ -1058,20 +1040,19 @@ cgraph_decide_inlining_of_small_functions (void)
if (dump_file)
{
fprintf (dump_file,
- " Inlined into %s which now has sie %i and self time %i,"
- "net change of %+i.\n",
+ " Inlined into %s which now has %i insns,"
+ "net change of %+i insns.\n",
cgraph_node_name (edge->caller),
- edge->caller->global.time,
- edge->caller->global.size,
- overall_size - old_size);
+ edge->caller->global.insns,
+ overall_insns - old_insns);
}
- if (min_size > overall_size)
+ if (min_insns > overall_insns)
{
- min_size = overall_size;
- max_size = compute_max_insns (min_size);
+ min_insns = overall_insns;
+ max_insns = compute_max_insns (min_insns);
if (dump_file)
- fprintf (dump_file, "New minimal size reached: %i\n", min_size);
+ fprintf (dump_file, "New minimal insns reached: %i\n", min_insns);
}
}
while ((edge = (struct cgraph_edge *) fibheap_extract_min (heap)) != NULL)
@@ -1100,38 +1081,34 @@ cgraph_decide_inlining (void)
int nnodes;
struct cgraph_node **order =
XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
- int old_size = 0;
+ int old_insns = 0;
int i;
+ int initial_insns = 0;
bool redo_always_inline = true;
- int initial_size = 0;
cgraph_remove_function_insertion_hook (function_insertion_hook_holder);
max_count = 0;
- max_benefit = 0;
for (node = cgraph_nodes; node; node = node->next)
- if (node->analyzed)
+ if (node->analyzed && (node->needed || node->reachable))
{
struct cgraph_edge *e;
- gcc_assert (inline_summary (node)->self_size == node->global.size);
- gcc_assert (node->needed || node->reachable);
- initial_size += node->global.size;
+ initial_insns += inline_summary (node)->self_insns;
+ gcc_assert (inline_summary (node)->self_insns == node->global.insns);
for (e = node->callees; e; e = e->next_callee)
if (max_count < e->count)
max_count = e->count;
- if (max_benefit < inline_summary (node)->time_inlining_benefit)
- max_benefit = inline_summary (node)->time_inlining_benefit;
}
+ overall_insns = initial_insns;
gcc_assert (!max_count || (profile_info && flag_branch_probabilities));
- overall_size = initial_size;
nnodes = cgraph_postorder (order);
if (dump_file)
fprintf (dump_file,
- "\nDeciding on inlining. Starting with size %i.\n",
- initial_size);
+ "\nDeciding on inlining. Starting with %i insns.\n",
+ initial_insns);
for (node = cgraph_nodes; node; node = node->next)
node->aux = 0;
@@ -1165,9 +1142,9 @@ cgraph_decide_inlining (void)
continue;
if (dump_file)
fprintf (dump_file,
- "\nConsidering %s size:%i (always inline)\n",
- cgraph_node_name (node), node->global.size);
- old_size = overall_size;
+ "\nConsidering %s %i insns (always inline)\n",
+ cgraph_node_name (node), node->global.insns);
+ old_insns = overall_insns;
for (e = node->callers; e; e = next)
{
next = e->next_caller;
@@ -1186,9 +1163,9 @@ cgraph_decide_inlining (void)
redo_always_inline = true;
if (dump_file)
fprintf (dump_file,
- " Inlined into %s which now has size %i.\n",
+ " Inlined into %s which now has %i insns.\n",
cgraph_node_name (e->caller),
- e->caller->global.size);
+ e->caller->global.insns);
}
/* Inlining self recursive function might introduce new calls to
themselves we didn't see in the loop above. Fill in the proper
@@ -1198,8 +1175,8 @@ cgraph_decide_inlining (void)
e->inline_failed = CIF_RECURSIVE_INLINING;
if (dump_file)
fprintf (dump_file,
- " Inlined for a net change of %+i size.\n",
- overall_size - old_size);
+ " Inlined for a net change of %+i insns.\n",
+ overall_insns - old_insns);
}
}
@@ -1227,25 +1204,27 @@ cgraph_decide_inlining (void)
if (dump_file)
{
fprintf (dump_file,
- "\nConsidering %s size %i.\n",
- cgraph_node_name (node), node->global.size);
+ "\nConsidering %s %i insns.\n",
+ cgraph_node_name (node), node->global.insns);
fprintf (dump_file,
" Called once from %s %i insns.\n",
cgraph_node_name (node->callers->caller),
- node->callers->caller->global.size);
+ node->callers->caller->global.insns);
}
+ old_insns = overall_insns;
+
if (cgraph_check_inline_limits (node->callers->caller, node,
NULL, false))
{
cgraph_mark_inline (node->callers);
if (dump_file)
fprintf (dump_file,
- " Inlined into %s which now has %i size"
- " for a net change of %+i size.\n",
+ " Inlined into %s which now has %i insns"
+ " for a net change of %+i insns.\n",
cgraph_node_name (node->callers->caller),
- node->callers->caller->global.size,
- overall_size - old_size);
+ node->callers->caller->global.insns,
+ overall_insns - old_insns);
}
else
{
@@ -1264,9 +1243,9 @@ cgraph_decide_inlining (void)
if (dump_file)
fprintf (dump_file,
"\nInlined %i calls, eliminated %i functions, "
- "size %i turned to %i size.\n\n",
- ncalls_inlined, nfunctions_inlined, initial_size,
- overall_size);
+ "%i insns turned to %i insns.\n\n",
+ ncalls_inlined, nfunctions_inlined, initial_insns,
+ overall_insns);
free (order);
return 0;
}
@@ -1451,7 +1430,6 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
if (mode != INLINE_ALL && mode != INLINE_ALWAYS_INLINE)
for (e = node->callees; e; e = e->next_callee)
{
- int allowed_growth = 0;
if (!e->callee->local.inlinable
|| !e->inline_failed
|| e->callee->local.disregard_inline_limits)
@@ -1478,10 +1456,6 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
}
continue;
}
-
- if (cgraph_maybe_hot_edge_p (e))
- allowed_growth = PARAM_VALUE (PARAM_EARLY_INLINING_INSNS);
-
/* When the function body would grow and inlining the function won't
eliminate the need for offline copy of the function, don't inline.
*/
@@ -1489,17 +1463,17 @@ cgraph_decide_inlining_incrementally (struct cgraph_node *node,
|| (!flag_inline_functions
&& !DECL_DECLARED_INLINE_P (e->callee->decl)))
&& (cgraph_estimate_size_after_inlining (1, e->caller, e->callee)
- >= e->caller->global.size + allowed_growth)
- && cgraph_estimate_growth (e->callee) >= allowed_growth)
+ > e->caller->global.insns)
+ && cgraph_estimate_growth (e->callee) > 0)
{
if (dump_file)
{
indent_to (dump_file, depth);
fprintf (dump_file,
- "Not inlining: code size would grow by %i.\n",
+ "Not inlining: code size would grow by %i insns.\n",
cgraph_estimate_size_after_inlining (1, e->caller,
e->callee)
- - e->caller->global.size);
+ - e->caller->global.insns);
}
continue;
}
@@ -1626,177 +1600,6 @@ struct simple_ipa_opt_pass pass_ipa_early_inline =
}
};
-/* See if statement might disappear after inlining. We are not terribly
- sophisficated, basically looking for simple abstraction penalty wrappers. */
-
-static bool
-likely_eliminated_by_inlining_p (gimple stmt)
-{
- enum gimple_code code = gimple_code (stmt);
- switch (code)
- {
- case GIMPLE_RETURN:
- return true;
- case GIMPLE_ASSIGN:
- if (gimple_num_ops (stmt) != 2)
- return false;
-
- /* Casts of parameters, loads from parameters passed by reference
- and stores to return value or parameters are probably free after
- inlining. */
- if (gimple_assign_rhs_code (stmt) == CONVERT_EXPR
- || gimple_assign_rhs_code (stmt) == NOP_EXPR
- || gimple_assign_rhs_code (stmt) == VIEW_CONVERT_EXPR
- || gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS)
- {
- tree rhs = gimple_assign_rhs1 (stmt);
- tree lhs = gimple_assign_lhs (stmt);
- tree inner_rhs = rhs;
- tree inner_lhs = lhs;
- bool rhs_free = false;
- bool lhs_free = false;
-
- while (handled_component_p (inner_lhs) || TREE_CODE (inner_lhs) == INDIRECT_REF)
- inner_lhs = TREE_OPERAND (inner_lhs, 0);
- while (handled_component_p (inner_rhs)
- || TREE_CODE (inner_rhs) == ADDR_EXPR || TREE_CODE (inner_rhs) == INDIRECT_REF)
- inner_rhs = TREE_OPERAND (inner_rhs, 0);
-
-
- if (TREE_CODE (inner_rhs) == PARM_DECL
- || (TREE_CODE (inner_rhs) == SSA_NAME
- && SSA_NAME_IS_DEFAULT_DEF (inner_rhs)
- && TREE_CODE (SSA_NAME_VAR (inner_rhs)) == PARM_DECL))
- rhs_free = true;
- if (rhs_free && is_gimple_reg (lhs))
- lhs_free = true;
- if (((TREE_CODE (inner_lhs) == PARM_DECL
- || (TREE_CODE (inner_lhs) == SSA_NAME
- && SSA_NAME_IS_DEFAULT_DEF (inner_lhs)
- && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == PARM_DECL))
- && inner_lhs != lhs)
- || TREE_CODE (inner_lhs) == RESULT_DECL
- || (TREE_CODE (inner_lhs) == SSA_NAME
- && TREE_CODE (SSA_NAME_VAR (inner_lhs)) == RESULT_DECL))
- lhs_free = true;
- if (lhs_free && (is_gimple_reg (rhs) || is_gimple_min_invariant (rhs)))
- rhs_free = true;
- if (lhs_free && rhs_free)
- return true;
- }
- return false;
- default:
- return false;
- }
-}
-
-/* Compute function body size parameters for NODE. */
-
-static void
-estimate_function_body_sizes (struct cgraph_node *node)
-{
- gcov_type time = 0;
- gcov_type time_inlining_benefit = 0;
- int size = 0;
- int size_inlining_benefit = 0;
- basic_block bb;
- gimple_stmt_iterator bsi;
- struct function *my_function = DECL_STRUCT_FUNCTION (node->decl);
- tree arg;
- int freq;
- tree funtype = TREE_TYPE (node->decl);
- bitmap must_not_throw = must_not_throw_labels ();
-
- if (dump_file)
- {
- fprintf (dump_file, "Analyzing function body size: %s\n", cgraph_node_name (node));
- }
-
- gcc_assert (my_function && my_function->cfg);
- FOR_EACH_BB_FN (bb, my_function)
- {
- freq = compute_call_stmt_bb_frequency (node->decl, bb);
- for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
- {
- int this_size = estimate_num_insns (gsi_stmt (bsi), &eni_size_weights);
- int this_time = estimate_num_insns (gsi_stmt (bsi), &eni_time_weights);
-
- /* MUST_NOT_THROW is usually handled by runtime calling terminate and stopping
- stacking unwinding. However when there is local cleanup that can resume
- to MUST_NOT_THROW then we generate explicit handler containing
- std::terminate () call.
-
- Because inlining of function can introduce new cleanup region, prior
- inlining we keep std::terinate () calls for every MUST_NOT_THROW containing
- function call. Wast majority of these will be eliminated after inlining
- and crossjumping will inify possible duplicated calls. So ignore
- the handlers for function body estimates. */
- if (gimple_code (gsi_stmt (bsi)) == GIMPLE_LABEL
- && bitmap_bit_p (must_not_throw,
- LABEL_DECL_UID (gimple_label_label (gsi_stmt (bsi)))))
- {
- if (dump_file)
- fprintf (dump_file, " MUST_NOT_THROW landing pad. Ignoring whole BB.\n");
- }
- if (dump_file)
- {
- fprintf (dump_file, " freq:%6i size:%3i time:%3i ", freq, this_size, this_time);
- print_gimple_stmt (dump_file, gsi_stmt (bsi), 0, 0);
- }
- this_time *= freq;
- time += this_time;
- size += this_size;
- if (likely_eliminated_by_inlining_p (gsi_stmt (bsi)))
- {
- size_inlining_benefit += this_size;
- time_inlining_benefit += this_time;
- if (dump_file)
- fprintf (dump_file, " Likely eliminated\n");
- }
- gcc_assert (time >= 0);
- gcc_assert (size >= 0);
- }
- }
- time = (time + CGRAPH_FREQ_BASE / 2) / CGRAPH_FREQ_BASE;
- time_inlining_benefit = ((time_inlining_benefit + CGRAPH_FREQ_BASE / 2)
- / CGRAPH_FREQ_BASE);
- if (dump_file)
- {
- fprintf (dump_file, "Overall function body time: %i-%i size: %i-%i\n",
- (int)time, (int)time_inlining_benefit,
- size, size_inlining_benefit);
- }
- time_inlining_benefit += eni_time_weights.call_cost;
- size_inlining_benefit += eni_size_weights.call_cost;
- if (!VOID_TYPE_P (TREE_TYPE (funtype)))
- {
- int cost = estimate_move_cost (TREE_TYPE (funtype));
- time_inlining_benefit += cost;
- size_inlining_benefit += cost;
- }
- for (arg = DECL_ARGUMENTS (node->decl); arg; arg = TREE_CHAIN (arg))
- {
- int cost = estimate_move_cost (TREE_TYPE (arg));
- time_inlining_benefit += cost;
- size_inlining_benefit += cost;
- }
- if (time_inlining_benefit > MAX_TIME)
- time_inlining_benefit = MAX_TIME;
- if (time > MAX_TIME)
- time = MAX_TIME;
- inline_summary (node)->self_time = time;
- inline_summary (node)->self_size = size;
- if (dump_file)
- {
- fprintf (dump_file, "With function call overhead time: %i-%i size: %i-%i\n",
- (int)time, (int)time_inlining_benefit,
- size, size_inlining_benefit);
- }
- inline_summary (node)->time_inlining_benefit = time_inlining_benefit;
- inline_summary (node)->size_inlining_benefit = size_inlining_benefit;
- BITMAP_FREE (must_not_throw);
-}
-
/* Compute parameters of functions used by inliner. */
unsigned int
compute_inline_parameters (struct cgraph_node *node)
@@ -1814,13 +1617,19 @@ compute_inline_parameters (struct cgraph_node *node)
/* Can this function be inlined at all? */
node->local.inlinable = tree_inlinable_function_p (current_function_decl);
+
+ /* Estimate the number of instructions for this function.
+ ??? At -O0 we don't use this information except for the dumps, and
+ even then only for always_inline functions. But disabling this
+ causes ICEs in the inline heuristics... */
+ inline_summary (node)->self_insns
+ = estimate_num_insns_fn (current_function_decl, &eni_inlining_weights);
if (node->local.inlinable && !node->local.disregard_inline_limits)
node->local.disregard_inline_limits
= DECL_DISREGARD_INLINE_LIMITS (current_function_decl);
- estimate_function_body_sizes (node);
+
/* Inlining characteristics are maintained by the cgraph_mark_inline. */
- node->global.time = inline_summary (node)->self_time;
- node->global.size = inline_summary (node)->self_size;
+ node->global.insns = inline_summary (node)->self_insns;
return 0;
}
@@ -1838,7 +1647,7 @@ struct gimple_opt_pass pass_inline_parameters =
{
{
GIMPLE_PASS,
- "inline_param", /* name */
+ NULL, /* name */
NULL, /* gate */
compute_inline_parameters_for_current,/* execute */
NULL, /* sub */