aboutsummaryrefslogtreecommitdiff
path: root/gcc/passes.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/passes.c')
-rw-r--r--gcc/passes.c846
1 files changed, 465 insertions, 381 deletions
diff --git a/gcc/passes.c b/gcc/passes.c
index 822e4db..0e85c1c 100644
--- a/gcc/passes.c
+++ b/gcc/passes.c
@@ -86,7 +86,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "dwarf2out.h"
#endif
-#if defined(DBX_DEBUGGING_INFO) || defined(XCOFF_DEBUGGING_INFO)
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
#include "dbxout.h"
#endif
@@ -472,9 +472,24 @@ rest_of_handle_final (void)
/* Release all memory held by regsets now. */
regset_release_memory ();
}
- timevar_pop (TV_FINAL);
+
+ /* Write DBX symbols if requested. */
+
+ /* Note that for those inline functions where we don't initially
+ know for certain that we will be generating an out-of-line copy,
+ the first invocation of this routine (rest_of_compilation) will
+ skip over this code by doing a `goto exit_rest_of_compilation;'.
+ Later on, wrapup_global_declarations will (indirectly) call
+ rest_of_compilation again for those inline functions that need
+ to have out-of-line copies generated. During that call, we
+ *will* be routed past here. */
+
+ timevar_push (TV_SYMOUT);
+ (*debug_hooks->function_decl) (current_function_decl);
+ timevar_pop (TV_SYMOUT);
ggc_collect ();
+ timevar_pop (TV_FINAL);
}
#ifdef DELAY_SLOTS
@@ -488,9 +503,10 @@ rest_of_handle_delay_slots (void)
dbr_schedule (get_insns (), dump_file);
close_dump_file (DFI_dbr, print_rtl, get_insns ());
- timevar_pop (TV_DBR_SCHED);
ggc_collect ();
+
+ timevar_pop (TV_DBR_SCHED);
}
#endif
@@ -532,9 +548,9 @@ rest_of_handle_stack_regs (void)
}
close_dump_file (DFI_stack, print_rtl_with_bb, get_insns ());
- timevar_pop (TV_REG_STACK);
ggc_collect ();
+ timevar_pop (TV_REG_STACK);
}
#endif
@@ -551,7 +567,7 @@ rest_of_handle_variable_tracking (void)
timevar_pop (TV_VAR_TRACKING);
}
-/* Machine independent reorg pass. */
+/* Machine dependent reorg pass. */
static void
rest_of_handle_machine_reorg (void)
{
@@ -561,9 +577,9 @@ rest_of_handle_machine_reorg (void)
targetm.machine_dependent_reorg ();
close_dump_file (DFI_mach, print_rtl, get_insns ());
- timevar_pop (TV_MACH_DEP);
ggc_collect ();
+ timevar_pop (TV_MACH_DEP);
}
@@ -574,17 +590,14 @@ rest_of_handle_new_regalloc (void)
{
int failure;
+ timevar_push (TV_LOCAL_ALLOC);
+ open_dump_file (DFI_lreg, current_function_decl);
+
delete_trivially_dead_insns (get_insns (), max_reg_num ());
reg_alloc ();
timevar_pop (TV_LOCAL_ALLOC);
- if (dump_file_tbl[DFI_lreg].enabled)
- {
- timevar_push (TV_DUMP);
-
- close_dump_file (DFI_lreg, NULL, NULL);
- timevar_pop (TV_DUMP);
- }
+ close_dump_file (DFI_lreg, NULL, NULL);
/* XXX clean up the whole mess to bring live info in shape again. */
timevar_push (TV_GLOBAL_ALLOC);
@@ -595,14 +608,14 @@ rest_of_handle_new_regalloc (void)
timevar_pop (TV_GLOBAL_ALLOC);
+ ggc_collect ();
+
if (dump_file_tbl[DFI_greg].enabled)
{
timevar_push (TV_DUMP);
-
dump_global_regs (dump_file);
-
- close_dump_file (DFI_greg, print_rtl_with_bb, get_insns ());
timevar_pop (TV_DUMP);
+ close_dump_file (DFI_greg, print_rtl_with_bb, get_insns ());
}
if (failure)
@@ -621,6 +634,9 @@ rest_of_handle_old_regalloc (void)
int failure;
int rebuild_notes;
+ timevar_push (TV_LOCAL_ALLOC);
+ open_dump_file (DFI_lreg, current_function_decl);
+
/* Allocate the reg_renumber array. */
allocate_reg_info (max_regno, FALSE, TRUE);
@@ -651,14 +667,13 @@ rest_of_handle_old_regalloc (void)
if (dump_file_tbl[DFI_lreg].enabled)
{
timevar_push (TV_DUMP);
-
dump_flow_info (dump_file);
dump_local_alloc (dump_file);
-
- close_dump_file (DFI_lreg, print_rtl_with_bb, get_insns ());
timevar_pop (TV_DUMP);
}
+ close_dump_file (DFI_lreg, print_rtl_with_bb, get_insns ());
+
ggc_collect ();
timevar_push (TV_GLOBAL_ALLOC);
@@ -675,18 +690,19 @@ rest_of_handle_old_regalloc (void)
failure = reload (get_insns (), 0);
}
- timevar_pop (TV_GLOBAL_ALLOC);
-
if (dump_file_tbl[DFI_greg].enabled)
{
timevar_push (TV_DUMP);
-
dump_global_regs (dump_file);
+ timevar_pop (TV_DUMP);
close_dump_file (DFI_greg, print_rtl_with_bb, get_insns ());
- timevar_pop (TV_DUMP);
}
+ ggc_collect ();
+
+ timevar_pop (TV_GLOBAL_ALLOC);
+
return failure;
}
@@ -738,52 +754,65 @@ rest_of_handle_reorder_blocks (void)
close_dump_file (DFI_bbro, print_rtl_with_bb, get_insns ());
}
+/* Partition hot and cold basic blocks. */
+static void
+rest_of_handle_partition_blocks (void)
+{
+ no_new_pseudos = 0;
+ partition_hot_cold_basic_blocks ();
+ allocate_reg_life_data ();
+ update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
+ PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES);
+ no_new_pseudos = 1;
+}
+
#ifdef INSN_SCHEDULING
/* Run instruction scheduler. */
+/* Perform SMS module scheduling. */
static void
-rest_of_handle_sched (void)
+rest_of_handle_sms (void)
{
timevar_push (TV_SMS);
- if (optimize > 0 && flag_modulo_sched)
- {
+ open_dump_file (DFI_sms, current_function_decl);
- /* Perform SMS module scheduling. */
- open_dump_file (DFI_sms, current_function_decl);
+ /* We want to be able to create new pseudos. */
+ no_new_pseudos = 0;
+ sms_schedule (dump_file);
+ close_dump_file (DFI_sms, print_rtl, get_insns ());
- /* We want to be able to create new pseudos. */
- no_new_pseudos = 0;
- sms_schedule (dump_file);
- close_dump_file (DFI_sms, print_rtl, get_insns ());
+ /* Update the life information, because we add pseudos. */
+ max_regno = max_reg_num ();
+ allocate_reg_info (max_regno, FALSE, FALSE);
+ update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
+ (PROP_DEATH_NOTES
+ | PROP_KILL_DEAD_CODE
+ | PROP_SCAN_DEAD_CODE));
+ no_new_pseudos = 1;
- /* Update the life information, because we add pseudos. */
- max_regno = max_reg_num ();
- allocate_reg_info (max_regno, FALSE, FALSE);
- update_life_info_in_dirty_blocks (UPDATE_LIFE_GLOBAL_RM_NOTES,
- (PROP_DEATH_NOTES
- | PROP_KILL_DEAD_CODE
- | PROP_SCAN_DEAD_CODE));
- no_new_pseudos = 1;
- }
+ ggc_collect ();
timevar_pop (TV_SMS);
+}
+
+/* Run instruction scheduler. */
+static void
+rest_of_handle_sched (void)
+{
timevar_push (TV_SCHED);
/* Print function header into sched dump now
because doing the sched analysis makes some of the dump. */
- if (optimize > 0 && flag_schedule_insns)
- {
- open_dump_file (DFI_sched, current_function_decl);
+ open_dump_file (DFI_sched, current_function_decl);
- /* Do control and data sched analysis,
- and write some of the results to dump file. */
+ /* Do control and data sched analysis,
+ and write some of the results to dump file. */
- schedule_insns (dump_file);
+ schedule_insns (dump_file);
- close_dump_file (DFI_sched, print_rtl_with_bb, get_insns ());
- }
- timevar_pop (TV_SCHED);
+ close_dump_file (DFI_sched, print_rtl_with_bb, get_insns ());
ggc_collect ();
+ timevar_pop (TV_SCHED);
}
/* Run second scheduling pass after reload. */
@@ -810,15 +839,17 @@ rest_of_handle_sched2 (void)
schedule_insns (dump_file);
close_dump_file (DFI_sched2, print_rtl_with_bb, get_insns ());
- timevar_pop (TV_SCHED2);
ggc_collect ();
+
+ timevar_pop (TV_SCHED2);
}
#endif
static void
rest_of_handle_gcse2 (void)
{
+ timevar_push (TV_RELOAD_CSE_REGS);
open_dump_file (DFI_gcse2, current_function_decl);
gcse_after_reload_main (get_insns (), dump_file);
@@ -831,6 +862,8 @@ rest_of_handle_gcse2 (void)
#ifdef ENABLE_CHECKING
verify_flow_info ();
#endif
+
+ timevar_pop (TV_RELOAD_CSE_REGS);
}
/* Register allocation pre-pass, to reduce number of moves necessary
@@ -845,9 +878,9 @@ rest_of_handle_regmove (void)
cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE);
close_dump_file (DFI_regmove, print_rtl_with_bb, get_insns ());
- timevar_pop (TV_REGMOVE);
ggc_collect ();
+ timevar_pop (TV_REGMOVE);
}
/* Run tracer. */
@@ -867,22 +900,25 @@ rest_of_handle_tracer (void)
static void
rest_of_handle_if_conversion (void)
{
+ timevar_push (TV_IFCVT);
open_dump_file (DFI_ce1, current_function_decl);
+
if (flag_if_conversion)
{
- timevar_push (TV_IFCVT);
if (dump_file)
dump_flow_info (dump_file);
cleanup_cfg (CLEANUP_EXPENSIVE);
reg_scan (get_insns (), max_reg_num (), 0);
if_convert (0);
- timevar_pop (TV_IFCVT);
}
+
timevar_push (TV_JUMP);
cleanup_cfg (CLEANUP_EXPENSIVE);
reg_scan (get_insns (), max_reg_num (), 0);
timevar_pop (TV_JUMP);
+
close_dump_file (DFI_ce1, print_rtl_with_bb, get_insns ());
+ timevar_pop (TV_IFCVT);
}
/* Rerun if-conversion, as combine may have simplified things enough
@@ -902,6 +938,23 @@ rest_of_handle_if_after_combine (void)
}
static void
+rest_of_handle_if_after_reload (void)
+{
+ timevar_push (TV_IFCVT2);
+ open_dump_file (DFI_ce3, current_function_decl);
+
+ /* Last attempt to optimize CFG, as scheduling, peepholing and insn
+ splitting possibly introduced more crossjumping opportunities. */
+ cleanup_cfg (CLEANUP_EXPENSIVE
+ | CLEANUP_UPDATE_LIFE
+ | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0));
+ if (flag_if_conversion2)
+ if_convert (1);
+ close_dump_file (DFI_ce3, print_rtl_with_bb, get_insns ());
+ timevar_pop (TV_IFCVT2);
+}
+
+static void
rest_of_handle_web (void)
{
open_dump_file (DFI_web, current_function_decl);
@@ -920,6 +973,7 @@ static void
rest_of_handle_branch_prob (void)
{
struct loops loops;
+
timevar_push (TV_BRANCH_PROB);
open_dump_file (DFI_bp, current_function_decl);
@@ -1026,7 +1080,7 @@ rest_of_handle_combine (void)
rebuild_jump_labels_after_combine
= combine_instructions (get_insns (), max_reg_num ());
- /* Combining get_insns () may have turned an indirect jump into a
+ /* Combining insns may have turned an indirect jump into a
direct jump. Rebuild the JUMP_LABEL fields of jumping
instructions. */
if (rebuild_jump_labels_after_combine)
@@ -1059,7 +1113,6 @@ rest_of_handle_life (void)
cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_UPDATE_LIFE
| CLEANUP_LOG_LINKS
| (flag_thread_jumps ? CLEANUP_THREADING : 0));
- timevar_pop (TV_FLOW);
if (extra_warnings)
{
@@ -1093,6 +1146,7 @@ static void
rest_of_handle_cse (void)
{
int tem;
+
open_dump_file (DFI_cse, current_function_decl);
if (dump_file)
dump_flow_info (dump_file);
@@ -1117,6 +1171,8 @@ rest_of_handle_cse (void)
timevar_pop (TV_CSE);
close_dump_file (DFI_cse, print_rtl_with_bb, get_insns ());
+
+ ggc_collect ();
}
/* Run second CSE pass after loop optimizations. */
@@ -1124,6 +1180,7 @@ static void
rest_of_handle_cse2 (void)
{
int tem;
+
timevar_push (TV_CSE2);
open_dump_file (DFI_cse2, current_function_decl);
if (dump_file)
@@ -1149,8 +1206,9 @@ rest_of_handle_cse2 (void)
}
reg_scan (get_insns (), max_reg_num (), 0);
close_dump_file (DFI_cse2, print_rtl_with_bb, get_insns ());
- ggc_collect ();
timevar_pop (TV_CSE2);
+
+ ggc_collect ();
}
/* Perform global cse. */
@@ -1159,6 +1217,7 @@ rest_of_handle_gcse (void)
{
int save_csb, save_cfj;
int tem2 = 0, tem;
+
timevar_push (TV_GCSE);
open_dump_file (DFI_gcse, current_function_decl);
@@ -1258,9 +1317,9 @@ rest_of_handle_loop_optimize (void)
/* Loop can create trivially dead instructions. */
delete_trivially_dead_insns (get_insns (), max_reg_num ());
+ find_basic_blocks (get_insns (), max_reg_num (), dump_file);
close_dump_file (DFI_loop, print_rtl, get_insns ());
timevar_pop (TV_LOOP);
- find_basic_blocks (get_insns (), max_reg_num (), dump_file);
ggc_collect ();
}
@@ -1330,6 +1389,319 @@ rest_of_handle_loop2 (void)
ggc_collect ();
}
+static void
+rest_of_handle_branch_target_load_optimize (void)
+{
+ static int warned = 0;
+
+ /* Leave this a warning for now so that it is possible to experiment
+ with running this pass twice. In 3.6, we should either make this
+ an error, or use separate dump files. */
+ if (flag_branch_target_load_optimize
+ && flag_branch_target_load_optimize2
+ && !warned)
+ {
+ warning ("branch target register load optimization is not intended "
+ "to be run twice");
+
+ warned = 1;
+ }
+
+ open_dump_file (DFI_branch_target_load, current_function_decl);
+ branch_target_load_optimize (epilogue_completed);
+ close_dump_file (DFI_branch_target_load, print_rtl_with_bb, get_insns ());
+ ggc_collect ();
+}
+
+#ifdef OPTIMIZE_MODE_SWITCHING
+static void
+rest_of_handle_mode_switching (void)
+{
+ timevar_push (TV_MODE_SWITCH);
+
+ no_new_pseudos = 0;
+ optimize_mode_switching (NULL);
+ no_new_pseudos = 1;
+
+ timevar_pop (TV_MODE_SWITCH);
+}
+#endif
+
+static void
+rest_of_handle_jump (void)
+{
+ ggc_collect ();
+
+ timevar_push (TV_JUMP);
+ open_dump_file (DFI_sibling, current_function_decl);
+
+ /* ??? We may get caled either via tree_rest_of_compilation when the CFG
+ is already built or directly (for instance from coverage code).
+ The direct callers shall be updated. */
+ if (!basic_block_info)
+ {
+ init_flow ();
+ rebuild_jump_labels (get_insns ());
+ find_exception_handler_labels ();
+ find_basic_blocks (get_insns (), max_reg_num (), dump_file);
+ }
+
+ /* ??? We may get called either via tree_rest_of_compilation when the CFG
+ is already built or directly (for instance from coverage code).
+ The direct callers shall be updated. */
+ if (!basic_block_info)
+ {
+ init_flow ();
+ rebuild_jump_labels (get_insns ());
+ find_exception_handler_labels ();
+ find_basic_blocks (get_insns (), max_reg_num (), dump_file);
+ }
+ delete_unreachable_blocks ();
+#ifdef ENABLE_CHECKING
+ verify_flow_info ();
+#endif
+ timevar_pop (TV_JUMP);
+}
+
+static void
+rest_of_handle_guess_branch_prob (void)
+{
+ /* Turn NOTE_INSN_PREDICTIONs into branch predictions. */
+ if (flag_guess_branch_prob)
+ {
+ timevar_push (TV_BRANCH_PROB);
+ note_prediction_to_br_prob ();
+ timevar_pop (TV_BRANCH_PROB);
+ }
+}
+
+static void
+rest_of_handle_eh (void)
+{
+ insn_locators_initialize ();
+ /* Complete generation of exception handling code. */
+ if (doing_eh (0))
+ {
+ timevar_push (TV_JUMP);
+ open_dump_file (DFI_eh, current_function_decl);
+
+ finish_eh_generation ();
+
+ close_dump_file (DFI_eh, print_rtl, get_insns ());
+ timevar_pop (TV_JUMP);
+ }
+}
+
+
+static void
+rest_of_handle_prologue_epilogue (void)
+{
+ if (optimize && !flow2_completed)
+ cleanup_cfg (CLEANUP_EXPENSIVE);
+
+ /* On some machines, the prologue and epilogue code, or parts thereof,
+ can be represented as RTL. Doing so lets us schedule insns between
+ it and the rest of the code and also allows delayed branch
+ scheduling to operate in the epilogue. */
+ thread_prologue_and_epilogue_insns (get_insns ());
+ epilogue_completed = 1;
+
+ if (optimize && flow2_completed)
+ life_analysis (dump_file, PROP_POSTRELOAD);
+}
+
+static void
+rest_of_handle_stack_adjustments (void)
+{
+ life_analysis (dump_file, PROP_POSTRELOAD);
+ cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE
+ | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0));
+
+ /* This is kind of a heuristic. We need to run combine_stack_adjustments
+ even for machines with possibly nonzero RETURN_POPS_ARGS
+ and ACCUMULATE_OUTGOING_ARGS. We expect that only ports having
+ push instructions will have popping returns. */
+#ifndef PUSH_ROUNDING
+ if (!ACCUMULATE_OUTGOING_ARGS)
+#endif
+ combine_stack_adjustments ();
+}
+
+static void
+rest_of_handle_flow2 (void)
+{
+ timevar_push (TV_FLOW2);
+ open_dump_file (DFI_flow2, current_function_decl);
+
+ /* Re-create the death notes which were deleted during reload. */
+#ifdef ENABLE_CHECKING
+ verify_flow_info ();
+#endif
+
+ /* If optimizing, then go ahead and split insns now. */
+#ifndef STACK_REGS
+ if (optimize > 0)
+#endif
+ split_all_insns (0);
+
+ if (flag_branch_target_load_optimize)
+ rest_of_handle_branch_target_load_optimize ();
+
+ if (!targetm.late_rtl_prologue_epilogue)
+ rest_of_handle_prologue_epilogue ();
+
+ if (optimize)
+ rest_of_handle_stack_adjustments ();
+
+ flow2_completed = 1;
+
+ close_dump_file (DFI_flow2, print_rtl_with_bb, get_insns ());
+ timevar_pop (TV_FLOW2);
+
+ ggc_collect ();
+}
+
+
+static void
+rest_of_handle_jump2 (void)
+{
+ open_dump_file (DFI_jump, current_function_decl);
+
+ /* Always do one jump optimization pass to ensure that JUMP_LABEL fields
+ are initialized and to compute whether control can drop off the end
+ of the function. */
+
+ timevar_push (TV_JUMP);
+ /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this
+ before jump optimization switches branch directions. */
+ if (flag_guess_branch_prob)
+ expected_value_to_br_prob ();
+
+ delete_trivially_dead_insns (get_insns (), max_reg_num ());
+ reg_scan (get_insns (), max_reg_num (), 0);
+ if (dump_file)
+ dump_flow_info (dump_file);
+ cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_PRE_LOOP
+ | (flag_thread_jumps ? CLEANUP_THREADING : 0));
+
+ create_loop_notes ();
+
+ purge_line_number_notes (get_insns ());
+
+ if (optimize)
+ cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
+
+ /* Jump optimization, and the removal of NULL pointer checks, may
+ have reduced the number of instructions substantially. CSE, and
+ future passes, allocate arrays whose dimensions involve the
+ maximum instruction UID, so if we can reduce the maximum UID
+ we'll save big on memory. */
+ renumber_insns (dump_file);
+
+ close_dump_file (DFI_jump, print_rtl_with_bb, get_insns ());
+ timevar_pop (TV_JUMP);
+
+ ggc_collect ();
+}
+
+#ifdef HAVE_peephole2
+static void
+rest_of_handle_peephole2 (void)
+{
+ timevar_push (TV_PEEPHOLE2);
+ open_dump_file (DFI_peephole2, current_function_decl);
+
+ peephole2_optimize (dump_file);
+
+ close_dump_file (DFI_peephole2, print_rtl_with_bb, get_insns ());
+ timevar_pop (TV_PEEPHOLE2);
+}
+#endif
+
+static void
+rest_of_handle_postreload (void)
+{
+ timevar_push (TV_RELOAD_CSE_REGS);
+ open_dump_file (DFI_postreload, current_function_decl);
+
+ /* Do a very simple CSE pass over just the hard registers. */
+ reload_cse_regs (get_insns ());
+ /* reload_cse_regs can eliminate potentially-trapping MEMs.
+ Remove any EH edges associated with them. */
+ if (flag_non_call_exceptions)
+ purge_all_dead_edges (0);
+
+ close_dump_file (DFI_postreload, print_rtl_with_bb, get_insns ());
+ timevar_pop (TV_RELOAD_CSE_REGS);
+}
+
+static void
+rest_of_handle_shorten_branches (void)
+{
+ /* Shorten branches. */
+ timevar_push (TV_SHORTEN_BRANCH);
+ shorten_branches (get_insns ());
+ timevar_pop (TV_SHORTEN_BRANCH);
+}
+
+static void
+rest_of_clean_state (void)
+{
+ coverage_end_function ();
+
+ /* In case the function was not output,
+ don't leave any temporary anonymous types
+ queued up for sdb output. */
+#ifdef SDB_DEBUGGING_INFO
+ if (write_symbols == SDB_DEBUG)
+ sdbout_types (NULL_TREE);
+#endif
+
+ reload_completed = 0;
+ epilogue_completed = 0;
+ flow2_completed = 0;
+ no_new_pseudos = 0;
+
+ timevar_push (TV_FINAL);
+
+ /* Clear out the insn_length contents now that they are no
+ longer valid. */
+ init_insn_lengths ();
+
+ /* Show no temporary slots allocated. */
+ init_temp_slots ();
+
+ free_basic_block_vars ();
+ free_bb_for_insn ();
+
+ timevar_pop (TV_FINAL);
+
+ if (targetm.binds_local_p (current_function_decl))
+ {
+ int pref = cfun->preferred_stack_boundary;
+ if (cfun->recursive_call_emit
+ && cfun->stack_alignment_needed > cfun->preferred_stack_boundary)
+ pref = cfun->stack_alignment_needed;
+ cgraph_rtl_info (current_function_decl)->preferred_incoming_stack_boundary
+ = pref;
+ }
+
+ /* Make sure volatile mem refs aren't considered valid operands for
+ arithmetic insns. We must call this here if this is a nested inline
+ function, since the above code leaves us in the init_recog state
+ (from final.c), and the function context push/pop code does not
+ save/restore volatile_ok.
+
+ ??? Maybe it isn't necessary for expand_start_function to call this
+ anymore if we do it here? */
+
+ init_recog_no_volatile ();
+
+ /* We're done with this function. Free up memory if we can. */
+ free_after_parsing (cfun);
+}
+
+
/* This is called from finish_function (within langhooks.parse_file)
after each top-level definition is parsed.
It is supposed to compile that function or variable
@@ -1343,10 +1715,6 @@ rest_of_compilation (void)
know we want to output it. */
DECL_DEFER_OUTPUT (current_function_decl) = 0;
- /* There's no need to defer outputting this function any more; we
- know we want to output it. */
- DECL_DEFER_OUTPUT (current_function_decl) = 0;
-
/* Register rtl specific functions for cfg. */
rtl_register_cfg_hooks ();
@@ -1354,7 +1722,7 @@ rest_of_compilation (void)
CONCATs anywhere. */
generating_concat_p = 0;
- /* When processing delayed functions, prepare_function_start() won't
+ /* When processing delayed functions, prepare_function_start () won't
have been run to re-initialize it. */
cse_not_expected = ! optimize;
@@ -1391,8 +1759,6 @@ rest_of_compilation (void)
collector to reclaim the memory used by the notes. */
remove_unnecessary_notes ();
- ggc_collect ();
-
/* Initialize some variables used by the optimizers. */
init_function_for_compilation ();
@@ -1403,7 +1769,10 @@ rest_of_compilation (void)
return value of called functions. Also, we can remove all SETs
of subregs of hard registers; they are only here because of
integrate. Also, we can now initialize pseudos intended to
- carry magic hard reg data throughout the function. */
+ carry magic hard reg data throughout the function.
+
+ FIXME: All this looks thoroughly obsolete... maybe we can
+ get rid of both these lines unconditionally? */
rtx_equal_function_value_matters = 0;
purge_hard_subreg_sets (get_insns ());
@@ -1412,49 +1781,13 @@ rest_of_compilation (void)
if (rtl_dump_and_exit || flag_syntax_only || errorcount || sorrycount)
goto exit_rest_of_compilation;
- timevar_push (TV_JUMP);
- open_dump_file (DFI_sibling, current_function_decl);
-
- /* ??? We may get called either via tree_rest_of_compilation when the CFG
- is already built or directly (for instance from coverage code).
- The direct callers shall be updated. */
- if (!basic_block_info)
- {
- init_flow ();
- rebuild_jump_labels (get_insns ());
- find_exception_handler_labels ();
- find_basic_blocks (get_insns (), max_reg_num (), dump_file);
- }
- delete_unreachable_blocks ();
-#ifdef ENABLE_CHECKING
- verify_flow_info();
-#endif
-
- /* Turn NOTE_INSN_PREDICTIONs into branch predictions. */
- if (flag_guess_branch_prob)
- {
- timevar_push (TV_BRANCH_PROB);
- note_prediction_to_br_prob ();
- timevar_pop (TV_BRANCH_PROB);
- }
-
- timevar_pop (TV_JUMP);
+ rest_of_handle_jump ();
+ rest_of_handle_guess_branch_prob ();
if (cfun->tail_call_emit)
fixup_tail_calls ();
- insn_locators_initialize ();
- /* Complete generation of exception handling code. */
- if (doing_eh (0))
- {
- timevar_push (TV_JUMP);
- open_dump_file (DFI_eh, current_function_decl);
-
- finish_eh_generation ();
-
- close_dump_file (DFI_eh, print_rtl, get_insns ());
- timevar_pop (TV_JUMP);
- }
+ rest_of_handle_eh ();
/* Delay emitting hard_reg_initial_value sets until after EH landing pad
generation, which might create new sets. */
@@ -1478,57 +1811,17 @@ rest_of_compilation (void)
at the RTL up to this point must understand that REG_SAVE_AREA
is just like a use of the REG contained inside. */
if (current_function_calls_alloca)
- optimize_save_area_alloca (get_insns ());
+ optimize_save_area_alloca ();
#endif
/* Instantiate all virtual registers. */
instantiate_virtual_regs ();
- open_dump_file (DFI_jump, current_function_decl);
-
- /* Always do one jump optimization pass to ensure that JUMP_LABEL fields
- are initialized and to compute whether control can drop off the end
- of the function. */
-
- timevar_push (TV_JUMP);
- /* Turn NOTE_INSN_EXPECTED_VALUE into REG_BR_PROB. Do this
- before jump optimization switches branch directions. */
- if (flag_guess_branch_prob)
- expected_value_to_br_prob ();
-
- delete_trivially_dead_insns (get_insns (), max_reg_num ());
- reg_scan (get_insns(), max_reg_num (), 0);
- if (dump_file)
- dump_flow_info (dump_file);
- cleanup_cfg ((optimize ? CLEANUP_EXPENSIVE : 0) | CLEANUP_PRE_LOOP
- | (flag_thread_jumps ? CLEANUP_THREADING : 0));
-
- create_loop_notes ();
-
- purge_line_number_notes (get_insns ());
-
- close_dump_file (DFI_jump, print_rtl, get_insns ());
-
- if (optimize)
- cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_PRE_LOOP);
-
- /* Jump optimization, and the removal of NULL pointer checks, may
- have reduced the number of instructions substantially. CSE, and
- future passes, allocate arrays whose dimensions involve the
- maximum instruction UID, so if we can reduce the maximum UID
- we'll save big on memory. */
- renumber_insns (dump_file);
- timevar_pop (TV_JUMP);
-
- close_dump_file (DFI_jump, print_rtl_with_bb, get_insns ());
-
- ggc_collect ();
+ rest_of_handle_jump2 ();
if (optimize > 0)
rest_of_handle_cse ();
- ggc_collect ();
-
if (optimize > 0)
{
if (flag_gcse)
@@ -1542,7 +1835,6 @@ rest_of_compilation (void)
}
timevar_push (TV_FLOW);
-
rest_of_handle_cfg ();
if (!flag_tree_based_profiling
@@ -1566,42 +1858,36 @@ rest_of_compilation (void)
if (optimize > 0)
rest_of_handle_if_conversion ();
- if (flag_tracer)
+ if (optimize > 0 && flag_tracer)
rest_of_handle_tracer ();
if (optimize > 0
&& flag_loop_optimize2)
rest_of_handle_loop2 ();
- if (flag_web)
+ if (optimize > 0 && flag_web)
rest_of_handle_web ();
- if (flag_rerun_cse_after_loop)
+ if (optimize > 0 && flag_rerun_cse_after_loop)
rest_of_handle_cse2 ();
cse_not_expected = 1;
rest_of_handle_life ();
+ timevar_pop (TV_FLOW);
if (optimize > 0)
rest_of_handle_combine ();
- if (flag_if_conversion)
+ if (optimize > 0 && flag_if_conversion)
rest_of_handle_if_after_combine ();
/* The optimization to partition hot/cold basic blocks into separate
sections of the .o file does not work well with exception handling.
Don't call it if there are exceptions. */
- if (flag_reorder_blocks_and_partition && !flag_exceptions)
- {
- no_new_pseudos = 0;
- partition_hot_cold_basic_blocks ();
- allocate_reg_life_data ();
- update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
- PROP_LOG_LINKS | PROP_REG_INFO | PROP_DEATH_NOTES);
- no_new_pseudos = 1;
- }
+ if (optimize > 0 && flag_reorder_blocks_and_partition && !flag_exceptions)
+ rest_of_handle_partition_blocks ();
if (optimize > 0 && (flag_regmove || flag_expensive_optimizations))
rest_of_handle_regmove ();
@@ -1611,13 +1897,7 @@ rest_of_compilation (void)
split_all_insns (1);
#ifdef OPTIMIZE_MODE_SWITCHING
- timevar_push (TV_MODE_SWITCH);
-
- no_new_pseudos = 0;
- optimize_mode_switching (NULL);
- no_new_pseudos = 1;
-
- timevar_pop (TV_MODE_SWITCH);
+ rest_of_handle_mode_switching ();
#endif
/* Any of the several passes since flow1 will have munged register
@@ -1626,7 +1906,11 @@ rest_of_compilation (void)
recompute_reg_usage (get_insns (), !optimize_size);
#ifdef INSN_SCHEDULING
- rest_of_handle_sched ();
+ if (optimize > 0 && flag_modulo_sched)
+ rest_of_handle_sms ();
+
+ if (flag_schedule_insns)
+ rest_of_handle_sched ();
#endif
/* Determine if the current function is a leaf before running reload
@@ -1634,9 +1918,6 @@ rest_of_compilation (void)
epilogue thus changing register elimination offsets. */
current_function_is_leaf = leaf_function_p ();
- timevar_push (TV_LOCAL_ALLOC);
- open_dump_file (DFI_lreg, current_function_decl);
-
if (flag_new_regalloc)
{
if (rest_of_handle_new_regalloc ())
@@ -1648,117 +1929,21 @@ rest_of_compilation (void)
goto exit_rest_of_compilation;
}
- ggc_collect ();
-
- open_dump_file (DFI_postreload, current_function_decl);
-
- /* Do a very simple CSE pass over just the hard registers. */
if (optimize > 0)
- {
- timevar_push (TV_RELOAD_CSE_REGS);
- reload_cse_regs (get_insns ());
- /* reload_cse_regs can eliminate potentially-trapping MEMs.
- Remove any EH edges associated with them. */
- if (flag_non_call_exceptions)
- purge_all_dead_edges (0);
- timevar_pop (TV_RELOAD_CSE_REGS);
- }
-
- close_dump_file (DFI_postreload, print_rtl_with_bb, get_insns ());
+ rest_of_handle_postreload ();
if (optimize > 0 && flag_gcse_after_reload)
rest_of_handle_gcse2 ();
- /* Re-create the death notes which were deleted during reload. */
- timevar_push (TV_FLOW2);
- open_dump_file (DFI_flow2, current_function_decl);
-
-#ifdef ENABLE_CHECKING
- verify_flow_info ();
-#endif
-
- /* If optimizing, then go ahead and split get_insns () now. */
-#ifndef STACK_REGS
- if (optimize > 0)
-#endif
- split_all_insns (0);
-
- if (flag_branch_target_load_optimize)
- {
- open_dump_file (DFI_branch_target_load, current_function_decl);
-
- branch_target_load_optimize (/*after_prologue_epilogue_gen=*/false);
-
- close_dump_file (DFI_branch_target_load, print_rtl_with_bb, get_insns ());
-
- ggc_collect ();
- }
-
- if (! targetm.late_rtl_prologue_epilogue)
- {
- if (optimize)
- cleanup_cfg (CLEANUP_EXPENSIVE);
-
- /* On some machines, the prologue and epilogue code, or parts thereof,
- can be represented as RTL. Doing so lets us schedule insns between
- it and the rest of the code and also allows delayed branch
- scheduling to operate in the epilogue. */
- thread_prologue_and_epilogue_insns (get_insns ());
- epilogue_completed = 1;
- }
-
- if (optimize)
- {
- life_analysis (dump_file, PROP_POSTRELOAD);
- cleanup_cfg (CLEANUP_EXPENSIVE | CLEANUP_UPDATE_LIFE
- | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0));
-
- /* This is kind of a heuristic. We need to run combine_stack_adjustments
- even for machines with possibly nonzero RETURN_POPS_ARGS
- and ACCUMULATE_OUTGOING_ARGS. We expect that only ports having
- push instructions will have popping returns. */
-#ifndef PUSH_ROUNDING
- if (!ACCUMULATE_OUTGOING_ARGS)
-#endif
- combine_stack_adjustments ();
-
- ggc_collect ();
- }
-
- flow2_completed = 1;
-
- close_dump_file (DFI_flow2, print_rtl_with_bb, get_insns ());
- timevar_pop (TV_FLOW2);
+ rest_of_handle_flow2 ();
#ifdef HAVE_peephole2
if (optimize > 0 && flag_peephole2)
- {
- timevar_push (TV_PEEPHOLE2);
- open_dump_file (DFI_peephole2, current_function_decl);
-
- peephole2_optimize (dump_file);
-
- close_dump_file (DFI_peephole2, print_rtl_with_bb, get_insns ());
- timevar_pop (TV_PEEPHOLE2);
- }
+ rest_of_handle_peephole2 ();
#endif
- open_dump_file (DFI_ce3, current_function_decl);
- if (optimize)
- /* Last attempt to optimize CFG, as scheduling, peepholing and insn
- splitting possibly introduced more crossjumping opportunities. */
- cleanup_cfg (CLEANUP_EXPENSIVE
- | CLEANUP_UPDATE_LIFE
- | (flag_crossjumping ? CLEANUP_CROSSJUMP : 0));
- if (flag_if_conversion2)
- {
- timevar_push (TV_IFCVT2);
-
- if_convert (1);
-
- timevar_pop (TV_IFCVT2);
- }
- close_dump_file (DFI_ce3, print_rtl_with_bb, get_insns ());
+ if (optimize > 0)
+ rest_of_handle_if_after_reload ();
if (optimize > 0)
{
@@ -1769,22 +1954,7 @@ rest_of_compilation (void)
}
if (flag_branch_target_load_optimize2)
- {
- /* Leave this a warning for now so that it is possible to experiment
- with running this pass twice. In 3.6, we should either make this
- an error, or use separate dump files. */
- if (flag_branch_target_load_optimize)
- warning ("branch target register load optimization is not intended "
- "to be run twice");
-
- open_dump_file (DFI_branch_target_load, current_function_decl);
-
- branch_target_load_optimize (/*after_prologue_epilogue_gen=*/true);
-
- close_dump_file (DFI_branch_target_load, print_rtl_with_bb, get_insns ());
-
- ggc_collect ();
- }
+ rest_of_handle_branch_target_load_optimize ();
#ifdef LEAF_REGISTERS
current_function_uses_only_leaf_regs
@@ -1792,16 +1962,7 @@ rest_of_compilation (void)
#endif
if (targetm.late_rtl_prologue_epilogue)
- {
- /* On some machines, the prologue and epilogue code, or parts thereof,
- can be represented as RTL. Doing so lets us schedule insns between
- it and the rest of the code and also allows delayed branch
- scheduling to operate in the epilogue. */
- thread_prologue_and_epilogue_insns (get_insns ());
- epilogue_completed = 1;
- if (optimize)
- life_analysis (dump_file, PROP_POSTRELOAD);
- }
+ rest_of_handle_prologue_epilogue ();
#ifdef INSN_SCHEDULING
if (optimize > 0 && flag_schedule_insns_after_reload)
@@ -1827,7 +1988,7 @@ rest_of_compilation (void)
cleanup_barriers ();
#ifdef DELAY_SLOTS
- if (optimize > 0 && flag_delayed_branch)
+ if (flag_delayed_branch)
rest_of_handle_delay_slots ();
#endif
@@ -1839,88 +2000,15 @@ rest_of_compilation (void)
convert_to_eh_region_ranges ();
- /* Shorten branches. */
- timevar_push (TV_SHORTEN_BRANCH);
- shorten_branches (get_insns ());
- timevar_pop (TV_SHORTEN_BRANCH);
+ rest_of_handle_shorten_branches ();
set_nothrow_function_flags ();
- if (current_function_nothrow)
- /* Now we know that this can't throw; set the flag for the benefit
- of other functions later in this translation unit. */
- TREE_NOTHROW (current_function_decl) = 1;
rest_of_handle_final ();
- /* Write DBX symbols if requested. */
-
- /* Note that for those inline functions where we don't initially
- know for certain that we will be generating an out-of-line copy,
- the first invocation of this routine (rest_of_compilation) will
- skip over this code by doing a `goto exit_rest_of_compilation;'.
- Later on, wrapup_global_declarations will (indirectly) call
- rest_of_compilation again for those inline functions that need
- to have out-of-line copies generated. During that call, we
- *will* be routed past here. */
-
- timevar_push (TV_SYMOUT);
- (*debug_hooks->function_decl) (current_function_decl);
- timevar_pop (TV_SYMOUT);
-
exit_rest_of_compilation:
- coverage_end_function ();
-
- /* In case the function was not output,
- don't leave any temporary anonymous types
- queued up for sdb output. */
-#ifdef SDB_DEBUGGING_INFO
- if (write_symbols == SDB_DEBUG)
- sdbout_types (NULL_TREE);
-#endif
-
- reload_completed = 0;
- epilogue_completed = 0;
- flow2_completed = 0;
- no_new_pseudos = 0;
-
- timevar_push (TV_FINAL);
-
- /* Clear out the insn_length contents now that they are no
- longer valid. */
- init_insn_lengths ();
-
- /* Show no temporary slots allocated. */
- init_temp_slots ();
-
- free_basic_block_vars ();
- free_bb_for_insn ();
-
- timevar_pop (TV_FINAL);
-
- if (targetm.binds_local_p (current_function_decl))
- {
- int pref = cfun->preferred_stack_boundary;
- if (cfun->recursive_call_emit
- && cfun->stack_alignment_needed > cfun->preferred_stack_boundary)
- pref = cfun->stack_alignment_needed;
- cgraph_rtl_info (current_function_decl)->preferred_incoming_stack_boundary
- = pref;
- }
-
- /* Make sure volatile mem refs aren't considered valid operands for
- arithmetic insns. We must call this here if this is a nested inline
- function, since the above code leaves us in the init_recog state
- (from final.c), and the function context push/pop code does not
- save/restore volatile_ok.
-
- ??? Maybe it isn't necessary for expand_start_function to call this
- anymore if we do it here? */
-
- init_recog_no_volatile ();
-
- /* We're done with this function. Free up memory if we can. */
- free_after_parsing (cfun);
+ rest_of_clean_state ();
}
void
@@ -1934,23 +2022,18 @@ init_optimization_passes (void)
void
finish_optimization_passes (void)
{
+ timevar_push (TV_DUMP);
if (profile_arc_flag || flag_test_coverage || flag_branch_probabilities)
{
- timevar_push (TV_DUMP);
open_dump_file (DFI_bp, NULL);
-
end_branch_prob ();
-
close_dump_file (DFI_bp, NULL, NULL_RTX);
- timevar_pop (TV_DUMP);
}
if (optimize > 0 && open_dump_file (DFI_combine, NULL))
{
- timevar_push (TV_DUMP);
dump_combine_total_stats (dump_file);
close_dump_file (DFI_combine, NULL, NULL_RTX);
- timevar_pop (TV_DUMP);
}
dump_file = cgraph_dump_file;
@@ -1975,6 +2058,7 @@ finish_optimization_passes (void)
}
}
+ timevar_pop (TV_DUMP);
}
bool