diff options
author | Richard Henderson <rth@redhat.com> | 2002-01-26 20:46:53 -0800 |
---|---|---|
committer | Richard Henderson <rth@gcc.gnu.org> | 2002-01-26 20:46:53 -0800 |
commit | 37a0f8a5254bdcdd68eb8fb711acf090c5adc97c (patch) | |
tree | 0550590bfcb89e31ecc9e480084d2d4bceb518e9 /gcc/sched-rgn.c | |
parent | cea3bd3e5a0a40eb90809bf90063da4911ba23b0 (diff) | |
download | gcc-37a0f8a5254bdcdd68eb8fb711acf090c5adc97c.zip gcc-37a0f8a5254bdcdd68eb8fb711acf090c5adc97c.tar.gz gcc-37a0f8a5254bdcdd68eb8fb711acf090c5adc97c.tar.bz2 |
sched-deps.c (reg_pending_uses_head): New.
* sched-deps.c (reg_pending_uses_head): New.
(reg_pending_barrier): Rename from reg_pending_sets_all.
(find_insn_list): Don't mark inline.
(find_insn_mem_list): Remove.
(add_dependence_list, add_dependence_list_and_free): New.
(flush_pending_lists): Replace only_write param with separate
for_read and for_write parameters. Update all callers. Use
add_dependence_list_and_free.
(sched_analyze_1): Do not add reg dependencies here; just set
the pending bits. Use add_dependence_list.
(sched_analyze_2): Likewise.
(sched_analyze_insn): Replace schedule_barrier_found with
reg_pending_barrier. Add all dependencies for pending reg
uses, sets, and clobbers.
(sched_analyze): Don't add reg dependencies for calls, just
set pending bits. Use regs_invalidated_by_call. Treat
sched_before_next_call as a normal list, not a fake insn.
(init_deps): No funny init for sched_before_next_call.
(free_deps): Free pending mems lists. Don't zero reg_last.
(init_deps_global): Init reg_pending_uses.
(finish_deps_global): Free it.
* sched-int.h (deps): Make in_post_call_group_p boolean. Update docs.
(find_insn_mem_list): Remove.
* sched-rgn.c (concat_INSN_LIST, concat_insn_mem_list): New.
(propagate_deps): Use them. Zero temp mem lists.
From-SVN: r49262
Diffstat (limited to 'gcc/sched-rgn.c')
-rw-r--r-- | gcc/sched-rgn.c | 216 |
1 files changed, 107 insertions, 109 deletions
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c index 9a7277a..b7193aa 100644 --- a/gcc/sched-rgn.c +++ b/gcc/sched-rgn.c @@ -300,6 +300,8 @@ void debug_dependencies PARAMS ((void)); static void init_regions PARAMS ((void)); static void schedule_region PARAMS ((int)); +static rtx concat_INSN_LIST PARAMS ((rtx, rtx)); +static void concat_insn_mem_list PARAMS ((rtx, rtx, rtx *, rtx *)); static void propagate_deps PARAMS ((int, struct deps *)); static void free_pending_lists PARAMS ((void)); @@ -2299,8 +2301,7 @@ add_branch_dependences (head, tail) { if (GET_CODE (insn) != NOTE) { - if (last != 0 - && !find_insn_list (insn, LOG_LINKS (last))) + if (last != 0 && !find_insn_list (insn, LOG_LINKS (last))) { add_dependence (last, insn, REG_DEP_ANTI); INSN_REF_COUNT (insn)++; @@ -2356,125 +2357,122 @@ add_branch_dependences (head, tail) static struct deps *bb_deps; +/* Duplicate the INSN_LIST elements of COPY and prepend them to OLD. */ + +static rtx +concat_INSN_LIST (copy, old) + rtx copy, old; +{ + rtx new = old; + for (; copy ; copy = XEXP (copy, 1)) + new = alloc_INSN_LIST (XEXP (copy, 0), new); + return new; +} + +static void +concat_insn_mem_list (copy_insns, copy_mems, old_insns_p, old_mems_p) + rtx copy_insns, copy_mems; + rtx *old_insns_p, *old_mems_p; +{ + rtx new_insns = *old_insns_p; + rtx new_mems = *old_mems_p; + + while (copy_insns) + { + new_insns = alloc_INSN_LIST (XEXP (copy_insns, 0), new_insns); + new_mems = alloc_EXPR_LIST (VOIDmode, XEXP (copy_mems, 0), new_mems); + copy_insns = XEXP (copy_insns, 1); + copy_mems = XEXP (copy_mems, 1); + } + + *old_insns_p = new_insns; + *old_mems_p = new_mems; +} + /* After computing the dependencies for block BB, propagate the dependencies found in TMP_DEPS to the successors of the block. */ static void -propagate_deps (bb, tmp_deps) +propagate_deps (bb, pred_deps) int bb; - struct deps *tmp_deps; + struct deps *pred_deps; { int b = BB_TO_BLOCK (bb); int e, first_edge; - int reg; - rtx link_insn, link_mem; - rtx u; - - /* These lists should point to the right place, for correct - freeing later. */ - bb_deps[bb].pending_read_insns = tmp_deps->pending_read_insns; - bb_deps[bb].pending_read_mems = tmp_deps->pending_read_mems; - bb_deps[bb].pending_write_insns = tmp_deps->pending_write_insns; - bb_deps[bb].pending_write_mems = tmp_deps->pending_write_mems; /* bb's structures are inherited by its successors. */ first_edge = e = OUT_EDGES (b); - if (e <= 0) - return; - - do - { - rtx x; - int b_succ = TO_BLOCK (e); - int bb_succ = BLOCK_TO_BB (b_succ); - struct deps *succ_deps = bb_deps + bb_succ; - - /* Only bbs "below" bb, in the same region, are interesting. */ - if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ) - || bb_succ <= bb) - { - e = NEXT_OUT (e); - continue; - } - - /* The reg_last lists are inherited by bb_succ. */ - EXECUTE_IF_SET_IN_REG_SET (&tmp_deps->reg_last_in_use, 0, reg, - { - struct deps_reg *tmp_deps_reg = &tmp_deps->reg_last[reg]; - struct deps_reg *succ_deps_reg = &succ_deps->reg_last[reg]; - - for (u = tmp_deps_reg->uses; u; u = XEXP (u, 1)) - if (! find_insn_list (XEXP (u, 0), succ_deps_reg->uses)) - succ_deps_reg->uses - = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->uses); - - for (u = tmp_deps_reg->sets; u; u = XEXP (u, 1)) - if (! find_insn_list (XEXP (u, 0), succ_deps_reg->sets)) - succ_deps_reg->sets - = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->sets); - - for (u = tmp_deps_reg->clobbers; u; u = XEXP (u, 1)) - if (! find_insn_list (XEXP (u, 0), succ_deps_reg->clobbers)) - succ_deps_reg->clobbers - = alloc_INSN_LIST (XEXP (u, 0), succ_deps_reg->clobbers); - }); - IOR_REG_SET (&succ_deps->reg_last_in_use, &tmp_deps->reg_last_in_use); - - /* Mem read/write lists are inherited by bb_succ. */ - link_insn = tmp_deps->pending_read_insns; - link_mem = tmp_deps->pending_read_mems; - while (link_insn) - { - if (!(find_insn_mem_list (XEXP (link_insn, 0), - XEXP (link_mem, 0), - succ_deps->pending_read_insns, - succ_deps->pending_read_mems))) - add_insn_mem_dependence (succ_deps, &succ_deps->pending_read_insns, - &succ_deps->pending_read_mems, - XEXP (link_insn, 0), XEXP (link_mem, 0)); - link_insn = XEXP (link_insn, 1); - link_mem = XEXP (link_mem, 1); - } + if (e > 0) + do + { + int b_succ = TO_BLOCK (e); + int bb_succ = BLOCK_TO_BB (b_succ); + struct deps *succ_deps = bb_deps + bb_succ; + int reg; + + /* Only bbs "below" bb, in the same region, are interesting. */ + if (CONTAINING_RGN (b) != CONTAINING_RGN (b_succ) + || bb_succ <= bb) + { + e = NEXT_OUT (e); + continue; + } - link_insn = tmp_deps->pending_write_insns; - link_mem = tmp_deps->pending_write_mems; - while (link_insn) - { - if (!(find_insn_mem_list (XEXP (link_insn, 0), - XEXP (link_mem, 0), - succ_deps->pending_write_insns, - succ_deps->pending_write_mems))) - add_insn_mem_dependence (succ_deps, - &succ_deps->pending_write_insns, - &succ_deps->pending_write_mems, - XEXP (link_insn, 0), XEXP (link_mem, 0)); - - link_insn = XEXP (link_insn, 1); - link_mem = XEXP (link_mem, 1); - } + /* The reg_last lists are inherited by bb_succ. */ + EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, + { + struct deps_reg *pred_rl = &pred_deps->reg_last[reg]; + struct deps_reg *succ_rl = &succ_deps->reg_last[reg]; + + succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses); + succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets); + succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers, + succ_rl->clobbers); + }); + IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use); + + /* Mem read/write lists are inherited by bb_succ. */ + concat_insn_mem_list (pred_deps->pending_read_insns, + pred_deps->pending_read_mems, + &succ_deps->pending_read_insns, + &succ_deps->pending_read_mems); + concat_insn_mem_list (pred_deps->pending_write_insns, + pred_deps->pending_write_mems, + &succ_deps->pending_write_insns, + &succ_deps->pending_write_mems); + + succ_deps->last_pending_memory_flush + = concat_INSN_LIST (pred_deps->last_pending_memory_flush, + succ_deps->last_pending_memory_flush); + + succ_deps->pending_lists_length += pred_deps->pending_lists_length; + succ_deps->pending_flush_length += pred_deps->pending_flush_length; + + /* last_function_call is inherited by bb_succ. */ + succ_deps->last_function_call + = concat_INSN_LIST (pred_deps->last_function_call, + succ_deps->last_function_call); + + /* sched_before_next_call is inherited by bb_succ. */ + succ_deps->sched_before_next_call + = concat_INSN_LIST (pred_deps->sched_before_next_call, + succ_deps->sched_before_next_call); + + e = NEXT_OUT (e); + } + while (e != first_edge); - /* last_function_call is inherited by bb_succ. */ - for (u = tmp_deps->last_function_call; u; u = XEXP (u, 1)) - if (! find_insn_list (XEXP (u, 0), succ_deps->last_function_call)) - succ_deps->last_function_call - = alloc_INSN_LIST (XEXP (u, 0), succ_deps->last_function_call); - - /* last_pending_memory_flush is inherited by bb_succ. */ - for (u = tmp_deps->last_pending_memory_flush; u; u = XEXP (u, 1)) - if (! find_insn_list (XEXP (u, 0), - succ_deps->last_pending_memory_flush)) - succ_deps->last_pending_memory_flush - = alloc_INSN_LIST (XEXP (u, 0), - succ_deps->last_pending_memory_flush); - - /* sched_before_next_call is inherited by bb_succ. */ - x = LOG_LINKS (tmp_deps->sched_before_next_call); - for (; x; x = XEXP (x, 1)) - add_dependence (succ_deps->sched_before_next_call, - XEXP (x, 0), REG_DEP_ANTI); - - e = NEXT_OUT (e); - } - while (e != first_edge); + /* These lists should point to the right place, for correct + freeing later. */ + bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns; + bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems; + bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns; + bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems; + + /* Can't allow these to be freed twice. */ + pred_deps->pending_read_insns = 0; + pred_deps->pending_read_mems = 0; + pred_deps->pending_write_insns = 0; + pred_deps->pending_write_mems = 0; } /* Compute backward dependences inside bb. In a multiple blocks region: |