diff options
author | Maxim Kuvyrkov <mkuvyrkov@ispras.ru> | 2006-03-16 05:27:03 +0000 |
---|---|---|
committer | Maxim Kuvyrkov <mkuvyrkov@gcc.gnu.org> | 2006-03-16 05:27:03 +0000 |
commit | 496d7bb03214b7835638fa14d7275e89d3bec954 (patch) | |
tree | 89738976186be03d3235bbf23baa42f8eeb612ca /gcc/sched-ebb.c | |
parent | 63f54b1abd832e2c6f7938aac2e2c455b23c91b7 (diff) | |
download | gcc-496d7bb03214b7835638fa14d7275e89d3bec954.zip gcc-496d7bb03214b7835638fa14d7275e89d3bec954.tar.gz gcc-496d7bb03214b7835638fa14d7275e89d3bec954.tar.bz2 |
target.h (struct spec_info_def): New opaque declaration.
2006-03-16 Maxim Kuvyrkov <mkuvyrkov@ispras.ru>
* target.h (struct spec_info_def): New opaque declaration.
(struct gcc_target.sched): New fields: adjust_cost_2, h_i_d_extended,
speculate_insn, needs_block_p, gen_check,
first_cycle_multipass_dfa_lookahead_guard_spec, set_sched_flags.
* target-def.h (TARGET_SCHED_ADJUST_COST_2,
TARGET_SCHED_H_I_D_EXTENDED, TARGET_SCHED_SPECULATE_INSN,
TARGET_SCHED_NEEDS_BLOCK_P, TARGET_SCHED_GEN_CHECK,
TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC,
TARGET_SCHED_SET_SCHED_FLAGS): New macros to initialize fields in
gcc_target.sched.
(TARGET_SCHED): Use new macros.
* rtl.h (copy_DEPS_LIST_list): New prototype.
* sched-int.h (struct sched_info): Change signature of new_ready field,
adjust all initializations. New fields: add_remove_insn,
begin_schedule_ready, add_block, advance_target_bb, fix_recovery_cfg,
region_head_or_leaf_p.
(struct spec_info_def): New structure declaration.
(spec_info_t): New typedef.
(struct haifa_insn_data): New fields: todo_spec, done_spec, check_spec,
recovery_block, orig_pat.
(glat_start, glat_end): New variables declaraions.
(TODO_SPEC, DONE_SPEC, CHECK_SPEC, RECOVERY_BLOCK, ORIG_PAT):
New access macros.
(enum SCHED_FLAGS): New constants: SCHED_RGN, SCHED_EBB,
DETACH_LIFE_INFO, USE_GLAT.
(enum SPEC_SCHED_FLAGS): New enumeration.
(NOTE_NOTE_BB_P): New macro.
(extend_dependency_caches, xrecalloc, unlink_bb_notes, add_block,
attach_life_info, debug_spec_status, check_reg_live): New functions.
(get_block_head_tail): Change signature to get_ebb_head_tail, adjust
all uses in ddg.c, modulo-sched.c, haifa-sched.c, sched-rgn.c,
sched-ebb.c
(get_dep_weak, ds_merge): Prototype functions from sched-deps.c .
* ddg.c (get_block_head_tail): Adjust all uses.
* modulo-sched.c (get_block_head_tail): Adjust all uses.
(sms_sched_info): Initialize new fields.
(contributes_to_priority): Removed.
* haifa-sched.c (params.h): New include.
(get_block_head_tail): Adjust all uses.
(ISSUE_POINTS): New macro.
(glat_start, glat_end): New global variables.
(spec_info_var, spec_info, added_recovery_block_p, nr_begin_data,
nr_be_in_data, nr_begin_control, nr_be_in_control, bb_header,
old_last_basic_block, before_recovery, current_sched_info_var,
rgn_n_insns, luid): New static variables.
(insn_cost1): New function. Move logic from insn_cost to here.
(find_insn_reg_weight1): New function. Move logic from
find_insn_reg_weight to here.
(reemit_notes, move_insn, max_issue): Change signature.
(move_insn1): Removed.
(extend_h_i_d, extend_ready, extend_global, extend_all, init_h_i_d,
extend_bb): New static functions to support extension of scheduler's
data structures.
(generate_recovery_code, process_insn_depend_be_in_spec,
begin_speculative_block, add_to_speculative_block,
init_before_recovery, create_recovery_block, create_check_block_twin,
fix_recovery_deps): New static functions to support
generation of recovery code.
(fix_jump_move, find_fallthru_edge, dump_new_block_header,
restore_bb_notes, move_block_after_check, move_succs): New static
functions to support ebb scheduling.
(init_glat, init_glat1, attach_life_info1, free_glat): New static
functions to support handling of register live information.
(associate_line_notes_with_blocks, change_pattern, speculate_insn,
sched_remove_insn, clear_priorities, calc_priorities, bb_note,
add_jump_dependencies): New static functions.
(check_cfg, has_edge_p, check_sched_flags): New static functions for
consistancy checking.
(debug_spec_status): New function to call from debugger.
(priority): Added code to handle speculation checks.
(rank_for_schedule): Added code to distinguish speculative instructions.
(schedule_insn): Added code to handle speculation checks.
(unlink_other_notes, rm_line_notes, restore_line_notes, rm_other_notes):
Fixed to handle ebbs.
(move_insn): Added code to handle ebb scheduling.
(max_issue): Added code to use ISSUE_POINTS of instructions.
(choose_ready): Added code to choose between speculative and
non-speculative instructions.
(schedule_block): Added code to handle ebb scheduling and scheduling of
speculative instructions.
(sched_init): Initialize new variables.
(sched_finish): Free new variables. Print statistics.
(try_ready): Added code to handle speculative instructions.
* lists.c (copy_DEPS_LIST_list): New function.
* sched-deps.c (extend_dependency_caches): New function. Move logic
from create_dependency_caches to here.
(get_dep_weak, ds_merge): Make global.
* genattr.c (main): Code to output prototype for
dfa_clear_single_insn_cache.
* genautomata.c (DFA_CLEAR_SINGLE_INSN_CACHE_FUNC_NAME): New macros.
(output_dfa_clean_insn_cache_func): Code to output
dfa_clear_single_insn_cache function.
* sched-ebb.c (target_n_insns): Remove. Adjust all users to use
n_insns.
(can_schedule_ready_p, fix_basic_block_boundaries, add_missing_bbs):
Removed.
(n_insns, dont_calc_deps, ebb_head, ebb_tail, last_bb):
New static variables.
(begin_schedule_ready, add_remove_insn, add_block1, advance_target_bb,
fix_recovery_cfg, ebb_head_or_leaf_p): Implement hooks from
struct sched_info.
(ebb_sched_info): Initialize new fields.
(get_block_head_tail): Adjust all uses.
(compute_jump_reg_dependencies): Fixed to use glat_start.
(schedule_ebb): Code to remove unreachable last block.
(schedule_ebbs): Added code to update register live information.
* sched-rgn.c (region_sched_info): Initialize new fields.
(get_block_head_tail): Adjust all uses.
(last_was_jump): Removed. Adjust users.
(begin_schedule_ready, add_remove_insn, insn_points, extend_regions,
add_block1, fix_recovery_cfg, advance_target_bb, region_head_or_leaf_p):
Implement new hooks.
(check_dead_notes1): New static function.
(struct region): New fields: dont_calc_deps, has_real_ebb.
(RGN_DONT_CALC_DEPS, RGN_HAS_REAL_EBB): New access macros.
(BB_TO_BLOCK): Fixed to handle EBBs.
(EBB_FIRST_BB, EBB_LAST_BB): New macros.
(ebb_head): New static variable.
(debug_regions, contributes_to_priority): Fixed to handle EBBs.
(find_single_block_regions, find_rgns, find_more_rgns): Initialize
new fields.
(compute_dom_prob_ps): New assertion.
(check_live_1, update_live_1): Fixed to work with glat_start instead of
global_live_at_start.
(init_ready_list): New assertions.
(can_schedule_ready_p): Split update code to begin_schedule_ready.
(new_ready): Add support for BEGIN_CONTROL speculation.
(schedule_insns): Fixed code that updates register live information
to handle EBBs.
(schedule_region): Fixed to handle EBBs.
(init_regions): Use extend_regions and check_dead_notes1.
* params.def (PARAM_MAX_SCHED_INSN_CONFLICT_DELAY,
PARAM_SCHED_SPEC_PROB_CUTOFF): New parameters.
* doc/tm.texi (TARGET_SCHED_ADJUST_COST_2, TARGET_SCHED_H_I_D_EXTENDED,
TARGET_SCHED_SPECULATE_INSN, TARGET_SCHED_NEEDS_BLOCK_P,
TARGET_SCHED_GEN_CHECK,
TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC,
TARGET_SCHED_SET_SCHED_FLAGS): Document.
* doc/invoke.texi (max-sched-insn-conflict-delay,
sched-spec-prob-cutoff): Document.
From-SVN: r112128
Diffstat (limited to 'gcc/sched-ebb.c')
-rw-r--r-- | gcc/sched-ebb.c | 463 |
1 files changed, 289 insertions, 174 deletions
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c index 0b2e1bf1..4126a5d 100644 --- a/gcc/sched-ebb.c +++ b/gcc/sched-ebb.c @@ -43,14 +43,23 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA #include "target.h" #include "output.h" -/* The number of insns to be scheduled in total. */ -static int target_n_insns; /* The number of insns scheduled so far. */ static int sched_n_insns; +/* The number of insns to be scheduled in total. */ +static int n_insns; + +/* Set of blocks, that already have their dependencies calculated. */ +static bitmap_head dont_calc_deps; +/* Set of basic blocks, that are ebb heads of tails respectively. */ +static bitmap_head ebb_head, ebb_tail; + +/* Last basic block in current ebb. */ +static basic_block last_bb; + /* Implementations of the sched_info functions for region scheduling. */ static void init_ready_list (void); -static int can_schedule_ready_p (rtx); +static void begin_schedule_ready (rtx, rtx); static int schedule_more_p (void); static const char *ebb_print_insn (rtx, int); static int rank (rtx, rtx); @@ -59,16 +68,22 @@ static void compute_jump_reg_dependencies (rtx, regset, regset, regset); static basic_block earliest_block_with_similiar_load (basic_block, rtx); static void add_deps_for_risky_insns (rtx, rtx); static basic_block schedule_ebb (rtx, rtx); -static basic_block fix_basic_block_boundaries (basic_block, basic_block, rtx, - rtx); -static void add_missing_bbs (rtx, basic_block, basic_block); + +static void add_remove_insn (rtx, int); +static void add_block1 (basic_block, basic_block); +static basic_block advance_target_bb (basic_block, rtx); +static void fix_recovery_cfg (int, int, int); + +#ifdef ENABLE_CHECKING +static int ebb_head_or_leaf_p (basic_block, int); +#endif /* Return nonzero if there are more insns that should be scheduled. */ static int schedule_more_p (void) { - return sched_n_insns < target_n_insns; + return sched_n_insns < n_insns; } /* Add all insns that are initially ready to the ready list READY. Called @@ -77,11 +92,11 @@ schedule_more_p (void) static void init_ready_list (void) { + int n = 0; rtx prev_head = current_sched_info->prev_head; rtx next_tail = current_sched_info->next_tail; rtx insn; - target_n_insns = 0; sched_n_insns = 0; #if 0 @@ -95,18 +110,74 @@ init_ready_list (void) for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) { try_ready (insn); - target_n_insns++; + n++; } -} -/* Called after taking INSN from the ready list. Returns nonzero if this - insn can be scheduled, nonzero if we should silently discard it. */ + gcc_assert (n == n_insns); +} -static int -can_schedule_ready_p (rtx insn ATTRIBUTE_UNUSED) +/* INSN is being scheduled after LAST. Update counters. */ +static void +begin_schedule_ready (rtx insn, rtx last) { sched_n_insns++; - return 1; + + if (BLOCK_FOR_INSN (insn) == last_bb + /* INSN is a jump in the last block, ... */ + && control_flow_insn_p (insn) + /* that is going to be moved over some instructions. */ + && last != PREV_INSN (insn)) + { + edge e; + edge_iterator ei; + basic_block bb; + + /* An obscure special case, where we do have partially dead + instruction scheduled after last control flow instruction. + In this case we can create new basic block. It is + always exactly one basic block last in the sequence. */ + + FOR_EACH_EDGE (e, ei, last_bb->succs) + if (e->flags & EDGE_FALLTHRU) + break; + +#ifdef ENABLE_CHECKING + gcc_assert (!e || !(e->flags & EDGE_COMPLEX)); + + gcc_assert (BLOCK_FOR_INSN (insn) == last_bb + && !RECOVERY_BLOCK (insn) + && BB_HEAD (last_bb) != insn + && BB_END (last_bb) == insn); + + { + rtx x; + + x = NEXT_INSN (insn); + if (e) + gcc_assert (NOTE_P (x) || LABEL_P (x)); + else + gcc_assert (BARRIER_P (x)); + } +#endif + + if (e) + { + bb = split_edge (e); + gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); + } + else + bb = create_basic_block (insn, 0, last_bb); + + /* split_edge () creates BB before E->DEST. Keep in mind, that + this operation extends scheduling region till the end of BB. + Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out + of the scheduling region. */ + current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); + gcc_assert (current_sched_info->next_tail); + + add_block (bb, last_bb); + gcc_assert (last_bb == bb); + } } /* Return a string that contains the insn uid and optionally anything else @@ -173,9 +244,9 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used, it may guard the fallthrough block from using a value that has conditionally overwritten that of the main codepath. So we consider that it restores the value of the main codepath. */ - bitmap_and (set, e->dest->il.rtl->global_live_at_start, cond_set); + bitmap_and (set, glat_start [e->dest->index], cond_set); else - bitmap_ior_into (used, e->dest->il.rtl->global_live_at_start); + bitmap_ior_into (used, glat_start [e->dest->index]); } /* Used in schedule_insns to initialize current_sched_info for scheduling @@ -184,7 +255,7 @@ compute_jump_reg_dependencies (rtx insn, regset cond_set, regset used, static struct sched_info ebb_sched_info = { init_ready_list, - can_schedule_ready_p, + NULL, schedule_more_p, NULL, rank, @@ -196,143 +267,19 @@ static struct sched_info ebb_sched_info = NULL, NULL, 0, 1, 0, - 0 + add_remove_insn, + begin_schedule_ready, + add_block1, + advance_target_bb, + fix_recovery_cfg, +#ifdef ENABLE_CHECKING + ebb_head_or_leaf_p, +#endif + /* We need to DETACH_LIVE_INFO to be able to create new basic blocks. + See begin_schedule_ready (). */ + SCHED_EBB | USE_GLAT | DETACH_LIFE_INFO }; -/* It is possible that ebb scheduling eliminated some blocks. - Place blocks from FIRST to LAST before BEFORE. */ - -static void -add_missing_bbs (rtx before, basic_block first, basic_block last) -{ - for (; last != first->prev_bb; last = last->prev_bb) - { - before = emit_note_before (NOTE_INSN_BASIC_BLOCK, before); - NOTE_BASIC_BLOCK (before) = last; - BB_HEAD (last) = before; - BB_END (last) = before; - update_bb_for_insn (last); - } -} - -/* Fixup the CFG after EBB scheduling. Re-recognize the basic - block boundaries in between HEAD and TAIL and update basic block - structures between BB and LAST. */ - -static basic_block -fix_basic_block_boundaries (basic_block bb, basic_block last, rtx head, - rtx tail) -{ - rtx insn = head; - rtx last_inside = BB_HEAD (bb); - rtx aftertail = NEXT_INSN (tail); - - head = BB_HEAD (bb); - - for (; insn != aftertail; insn = NEXT_INSN (insn)) - { - gcc_assert (!LABEL_P (insn)); - /* Create new basic blocks just before first insn. */ - if (inside_basic_block_p (insn)) - { - if (!last_inside) - { - rtx note; - - /* Re-emit the basic block note for newly found BB header. */ - if (LABEL_P (insn)) - { - note = emit_note_after (NOTE_INSN_BASIC_BLOCK, insn); - head = insn; - last_inside = note; - } - else - { - note = emit_note_before (NOTE_INSN_BASIC_BLOCK, insn); - head = note; - last_inside = insn; - } - } - else - last_inside = insn; - } - /* Control flow instruction terminate basic block. It is possible - that we've eliminated some basic blocks (made them empty). - Find the proper basic block using BLOCK_FOR_INSN and arrange things in - a sensible way by inserting empty basic blocks as needed. */ - if (control_flow_insn_p (insn) || (insn == tail && last_inside)) - { - basic_block curr_bb = BLOCK_FOR_INSN (insn); - rtx note; - - if (!control_flow_insn_p (insn)) - curr_bb = last; - if (bb == last->next_bb) - { - edge f; - rtx h; - edge_iterator ei; - - /* An obscure special case, where we do have partially dead - instruction scheduled after last control flow instruction. - In this case we can create new basic block. It is - always exactly one basic block last in the sequence. Handle - it by splitting the edge and repositioning the block. - This is somewhat hackish, but at least avoid cut&paste - - A safer solution can be to bring the code into sequence, - do the split and re-emit it back in case this will ever - trigger problem. */ - - FOR_EACH_EDGE (f, ei, bb->prev_bb->succs) - if (f->flags & EDGE_FALLTHRU) - break; - - if (f) - { - last = curr_bb = split_edge (f); - h = BB_HEAD (curr_bb); - BB_HEAD (curr_bb) = head; - BB_END (curr_bb) = insn; - /* Edge splitting created misplaced BASIC_BLOCK note, kill - it. */ - delete_insn (h); - } - /* It may happen that code got moved past unconditional jump in - case the code is completely dead. Kill it. */ - else - { - rtx next = next_nonnote_insn (insn); - delete_insn_chain (head, insn); - /* We keep some notes in the way that may split barrier from the - jump. */ - if (BARRIER_P (next)) - { - emit_barrier_after (prev_nonnote_insn (head)); - delete_insn (next); - } - insn = NULL; - } - } - else - { - BB_HEAD (curr_bb) = head; - BB_END (curr_bb) = insn; - add_missing_bbs (BB_HEAD (curr_bb), bb, curr_bb->prev_bb); - } - note = LABEL_P (head) ? NEXT_INSN (head) : head; - NOTE_BASIC_BLOCK (note) = curr_bb; - update_bb_for_insn (curr_bb); - bb = curr_bb->next_bb; - last_inside = NULL; - if (!insn) - break; - } - } - add_missing_bbs (BB_HEAD (last->next_bb), bb, last); - return bb->prev_bb; -} - /* Returns the earliest block in EBB currently being processed where a "similar load" 'insn2' is found, and hence LOAD_INSN can move speculatively into the found block. All the following must hold: @@ -488,29 +435,40 @@ add_deps_for_risky_insns (rtx head, rtx tail) static basic_block schedule_ebb (rtx head, rtx tail) { - int n_insns; - basic_block b; + basic_block first_bb, target_bb; struct deps tmp_deps; - basic_block first_bb = BLOCK_FOR_INSN (head); - basic_block last_bb = BLOCK_FOR_INSN (tail); + + first_bb = BLOCK_FOR_INSN (head); + last_bb = BLOCK_FOR_INSN (tail); if (no_real_insns_p (head, tail)) return BLOCK_FOR_INSN (tail); - init_deps_global (); + gcc_assert (INSN_P (head) && INSN_P (tail)); + + if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) + { + init_deps_global (); - /* Compute LOG_LINKS. */ - init_deps (&tmp_deps); - sched_analyze (&tmp_deps, head, tail); - free_deps (&tmp_deps); + /* Compute LOG_LINKS. */ + init_deps (&tmp_deps); + sched_analyze (&tmp_deps, head, tail); + free_deps (&tmp_deps); - /* Compute INSN_DEPEND. */ - compute_forward_dependences (head, tail); + /* Compute INSN_DEPEND. */ + compute_forward_dependences (head, tail); - add_deps_for_risky_insns (head, tail); + add_deps_for_risky_insns (head, tail); - if (targetm.sched.dependencies_evaluation_hook) - targetm.sched.dependencies_evaluation_hook (head, tail); + if (targetm.sched.dependencies_evaluation_hook) + targetm.sched.dependencies_evaluation_hook (head, tail); + + finish_deps_global (); + } + else + /* Only recovery blocks can have their dependencies already calculated, + and they always are single block ebbs. */ + gcc_assert (first_bb == last_bb); /* Set priorities. */ current_sched_info->sched_max_insns_priority = 0; @@ -546,10 +504,16 @@ schedule_ebb (rtx head, rtx tail) schedule_block (). */ rm_other_notes (head, tail); + unlink_bb_notes (first_bb, last_bb); + current_sched_info->queue_must_finish_empty = 1; - schedule_block (-1, n_insns); + target_bb = first_bb; + schedule_block (&target_bb, n_insns); + /* We might pack all instructions into fewer blocks, + so we may made some of them empty. Can't assert (b == last_bb). */ + /* Sanity check: verify that all region insns were scheduled. */ gcc_assert (sched_n_insns == n_insns); head = current_sched_info->head; @@ -557,10 +521,17 @@ schedule_ebb (rtx head, rtx tail) if (write_symbols != NO_DEBUG) restore_line_notes (head, tail); - b = fix_basic_block_boundaries (first_bb, last_bb, head, tail); - finish_deps_global (); - return b; + if (EDGE_COUNT (last_bb->preds) == 0) + /* LAST_BB is unreachable. */ + { + gcc_assert (first_bb != last_bb + && EDGE_COUNT (last_bb->succs) == 0); + last_bb = last_bb->prev_bb; + delete_basic_block (last_bb->next_bb); + } + + return last_bb; } /* The one entry point in this file. */ @@ -570,6 +541,9 @@ schedule_ebbs (void) { basic_block bb; int probability_cutoff; + rtx tail; + sbitmap large_region_blocks, blocks; + int any_large_regions; if (profile_info && flag_branch_probabilities) probability_cutoff = PARAM_VALUE (TRACER_MIN_BRANCH_PROBABILITY_FEEDBACK); @@ -590,11 +564,18 @@ schedule_ebbs (void) compute_bb_for_insn (); + /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */ + bitmap_initialize (&dont_calc_deps, 0); + bitmap_clear (&dont_calc_deps); + bitmap_initialize (&ebb_head, 0); + bitmap_clear (&ebb_head); + bitmap_initialize (&ebb_tail, 0); + bitmap_clear (&ebb_tail); + /* Schedule every region in the subroutine. */ FOR_EACH_BB (bb) { rtx head = BB_HEAD (bb); - rtx tail; for (;;) { @@ -628,11 +609,71 @@ schedule_ebbs (void) break; } + bitmap_set_bit (&ebb_head, BLOCK_NUM (head)); bb = schedule_ebb (head, tail); + bitmap_set_bit (&ebb_tail, bb->index); } + bitmap_clear (&dont_calc_deps); - /* Updating life info can be done by local propagation over the modified - superblocks. */ + gcc_assert (current_sched_info->flags & DETACH_LIFE_INFO); + /* We can create new basic blocks during scheduling, and + attach_life_info () will create regsets for them + (along with attaching existing info back). */ + attach_life_info (); + + /* Updating register live information. */ + allocate_reg_life_data (); + + any_large_regions = 0; + large_region_blocks = sbitmap_alloc (last_basic_block); + sbitmap_zero (large_region_blocks); + FOR_EACH_BB (bb) + SET_BIT (large_region_blocks, bb->index); + + blocks = sbitmap_alloc (last_basic_block); + sbitmap_zero (blocks); + + /* Update life information. For regions consisting of multiple blocks + we've possibly done interblock scheduling that affects global liveness. + For regions consisting of single blocks we need to do only local + liveness. */ + FOR_EACH_BB (bb) + { + int bbi; + + bbi = bb->index; + + if (!bitmap_bit_p (&ebb_head, bbi) + || !bitmap_bit_p (&ebb_tail, bbi) + /* New blocks (e.g. recovery blocks) should be processed + as parts of large regions. */ + || !glat_start[bbi]) + any_large_regions = 1; + else + { + SET_BIT (blocks, bbi); + RESET_BIT (large_region_blocks, bbi); + } + } + + update_life_info (blocks, UPDATE_LIFE_LOCAL, 0); + sbitmap_free (blocks); + + if (any_large_regions) + { + update_life_info (large_region_blocks, UPDATE_LIFE_GLOBAL, 0); + +#ifdef ENABLE_CHECKING + /* !!! We can't check reg_live_info here because of the fact, + that destination registers of COND_EXEC's may be dead + before scheduling (while they should be alive). Don't know why. */ + /*check_reg_live ();*/ +#endif + } + sbitmap_free (large_region_blocks); + + bitmap_clear (&ebb_head); + bitmap_clear (&ebb_tail); /* Reposition the prologue and epilogue notes in case we moved the prologue/epilogue insns. */ @@ -644,3 +685,77 @@ schedule_ebbs (void) sched_finish (); } + +/* INSN has been added to/removed from current ebb. */ +static void +add_remove_insn (rtx insn ATTRIBUTE_UNUSED, int remove_p) +{ + if (!remove_p) + n_insns++; + else + n_insns--; +} + +/* BB was added to ebb after AFTER. */ +static void +add_block1 (basic_block bb, basic_block after) +{ + /* Recovery blocks are always bounded by BARRIERS, + therefore, they always form single block EBB, + therefore, we can use rec->index to identify such EBBs. */ + if (after == EXIT_BLOCK_PTR) + bitmap_set_bit (&dont_calc_deps, bb->index); + else if (after == last_bb) + last_bb = bb; +} + +/* Return next block in ebb chain. For parameter meaning please refer to + sched-int.h: struct sched_info: advance_target_bb. */ +static basic_block +advance_target_bb (basic_block bb, rtx insn) +{ + if (insn) + { + if (BLOCK_FOR_INSN (insn) != bb + && control_flow_insn_p (insn) + && !RECOVERY_BLOCK (insn) + && !RECOVERY_BLOCK (BB_END (bb))) + { + gcc_assert (!control_flow_insn_p (BB_END (bb)) + && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb))); + return bb; + } + else + return 0; + } + else if (bb != last_bb) + return bb->next_bb; + else + gcc_unreachable (); +} + +/* Fix internal data after interblock movement of jump instruction. + For parameter meaning please refer to + sched-int.h: struct sched_info: fix_recovery_cfg. */ +static void +fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, int jump_bb_nexti) +{ + gcc_assert (last_bb->index != bbi); + + if (jump_bb_nexti == last_bb->index) + last_bb = BASIC_BLOCK (jump_bbi); +} + +#ifdef ENABLE_CHECKING +/* Return non zero, if BB is first or last (depending of LEAF_P) block in + current ebb. For more information please refer to + sched-int.h: struct sched_info: region_head_or_leaf_p. */ +static int +ebb_head_or_leaf_p (basic_block bb, int leaf_p) +{ + if (!leaf_p) + return bitmap_bit_p (&ebb_head, bb->index); + else + return bitmap_bit_p (&ebb_tail, bb->index); +} +#endif /* ENABLE_CHECKING */ |