aboutsummaryrefslogtreecommitdiff
path: root/gcc/cfgcleanup.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/cfgcleanup.c')
-rw-r--r--gcc/cfgcleanup.c202
1 files changed, 106 insertions, 96 deletions
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index f9d0607..eccaab4 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -124,9 +124,7 @@ try_simplify_condjump (basic_block cbranch_block)
rtx cbranch_insn;
/* Verify that there are exactly two successors. */
- if (!cbranch_block->succ
- || !cbranch_block->succ->succ_next
- || cbranch_block->succ->succ_next->succ_next)
+ if (EDGE_COUNT (cbranch_block->succs) != 2)
return false;
/* Verify that we've got a normal conditional branch at the end
@@ -142,11 +140,11 @@ try_simplify_condjump (basic_block cbranch_block)
be the last block in the function, and must contain just the
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
- if (jump_block->pred->pred_next
+ if (EDGE_COUNT (jump_block->preds) >= 2
|| jump_block->next_bb == EXIT_BLOCK_PTR
|| !FORWARDER_BLOCK_P (jump_block))
return false;
- jump_dest_block = jump_block->succ->dest;
+ jump_dest_block = EDGE_SUCC (jump_block, 0)->dest;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
@@ -290,9 +288,9 @@ thread_jump (int mode, edge e, basic_block b)
/* At the moment, we do handle only conditional jumps, but later we may
want to extend this code to tablejumps and others. */
- if (!e->src->succ->succ_next || e->src->succ->succ_next->succ_next)
+ if (EDGE_COUNT (e->src->succs) != 2)
return NULL;
- if (!b->succ || !b->succ->succ_next || b->succ->succ_next->succ_next)
+ if (EDGE_COUNT (b->succs) != 2)
{
BB_SET_FLAG (b, BB_NONTHREADABLE_BLOCK);
return NULL;
@@ -421,7 +419,8 @@ static bool
try_forward_edges (int mode, basic_block b)
{
bool changed = false;
- edge e, next, *threaded_edges = NULL;
+ edge_iterator ei;
+ edge e, *threaded_edges = NULL;
/* If we are partitioning hot/cold basic blocks, we don't want to
mess up unconditional or indirect jumps that cross between hot
@@ -437,7 +436,7 @@ try_forward_edges (int mode, basic_block b)
&& find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX))
return false;
- for (e = b->succ; e; e = next)
+ for (ei = ei_start (b->succs); (e = ei_safe_edge (ei)); )
{
basic_block target, first;
int counter;
@@ -445,15 +444,16 @@ try_forward_edges (int mode, basic_block b)
int nthreaded_edges = 0;
bool may_thread = first_pass | (b->flags & BB_DIRTY);
- next = e->succ_next;
-
/* Skip complex edges because we don't know how to update them.
Still handle fallthru edges, as we can succeed to forward fallthru
edge to the same place as the branch edge of conditional branch
and turn conditional branch to an unconditional branch. */
if (e->flags & EDGE_COMPLEX)
- continue;
+ {
+ ei_next (&ei);
+ continue;
+ }
target = first = e->dest;
counter = 0;
@@ -480,13 +480,13 @@ try_forward_edges (int mode, basic_block b)
may_thread |= target->flags & BB_DIRTY;
if (FORWARDER_BLOCK_P (target)
- && !(target->succ->flags & EDGE_CROSSING)
- && target->succ->dest != EXIT_BLOCK_PTR)
+ && !(EDGE_SUCC (target, 0)->flags & EDGE_CROSSING)
+ && EDGE_SUCC (target, 0)->dest != EXIT_BLOCK_PTR)
{
/* Bypass trivial infinite loops. */
- if (target == target->succ->dest)
+ if (target == EDGE_SUCC (target, 0)->dest)
counter = n_basic_blocks;
- new_target = target->succ->dest;
+ new_target = EDGE_SUCC (target, 0)->dest;
}
/* Allow to thread only over one edge at time to simplify updating
@@ -538,7 +538,7 @@ try_forward_edges (int mode, basic_block b)
it must appear before the JUMP_INSN. */
if ((mode & CLEANUP_PRE_LOOP) && optimize)
{
- rtx insn = (target->succ->flags & EDGE_FALLTHRU
+ rtx insn = (EDGE_SUCC (target, 0)->flags & EDGE_FALLTHRU
? BB_HEAD (target) : prev_nonnote_insn (BB_END (target)));
if (!NOTE_P (insn))
@@ -597,6 +597,7 @@ try_forward_edges (int mode, basic_block b)
fprintf (dump_file,
"Forwarding edge %i->%i to %i failed.\n",
b->index, e->dest->index, target->index);
+ ei_next (&ei);
continue;
}
@@ -614,7 +615,7 @@ try_forward_edges (int mode, basic_block b)
{
edge t;
- if (first->succ->succ_next)
+ if (EDGE_COUNT (first->succs) > 1)
{
gcc_assert (n < nthreaded_edges);
t = threaded_edges [n++];
@@ -638,7 +639,7 @@ try_forward_edges (int mode, basic_block b)
if (n < nthreaded_edges
&& first == threaded_edges [n]->src)
n++;
- t = first->succ;
+ t = EDGE_SUCC (first, 0);
}
t->count -= edge_count;
@@ -649,7 +650,9 @@ try_forward_edges (int mode, basic_block b)
while (first != target);
changed = true;
+ continue;
}
+ ei_next (&ei);
}
if (threaded_edges)
@@ -837,6 +840,7 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
edge tmp_edge, b_fallthru_edge;
bool c_has_outgoing_fallthru;
bool b_has_incoming_fallthru;
+ edge_iterator ei;
/* Avoid overactive code motion, as the forwarder blocks should be
eliminated by edge redirection instead. One exception might have
@@ -849,13 +853,13 @@ merge_blocks_move (edge e, basic_block b, basic_block c, int mode)
and loop notes. This is done by squeezing out all the notes
and leaving them there to lie. Not ideal, but functional. */
- for (tmp_edge = c->succ; tmp_edge; tmp_edge = tmp_edge->succ_next)
+ FOR_EACH_EDGE (tmp_edge, ei, c->succs)
if (tmp_edge->flags & EDGE_FALLTHRU)
break;
c_has_outgoing_fallthru = (tmp_edge != NULL);
- for (tmp_edge = b->pred; tmp_edge; tmp_edge = tmp_edge->pred_next)
+ FOR_EACH_EDGE (tmp_edge, ei, b->preds)
if (tmp_edge->flags & EDGE_FALLTHRU)
break;
@@ -1214,21 +1218,20 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
int nehedges1 = 0, nehedges2 = 0;
edge fallthru1 = 0, fallthru2 = 0;
edge e1, e2;
+ edge_iterator ei;
/* If BB1 has only one successor, we may be looking at either an
unconditional jump, or a fake edge to exit. */
- if (bb1->succ && !bb1->succ->succ_next
- && (bb1->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
+ if (EDGE_COUNT (bb1->succs) == 1
+ && (EDGE_SUCC (bb1, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
&& (!JUMP_P (BB_END (bb1)) || simplejump_p (BB_END (bb1))))
- return (bb2->succ && !bb2->succ->succ_next
- && (bb2->succ->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
+ return (EDGE_COUNT (bb2->succs) == 1
+ && (EDGE_SUCC (bb2, 0)->flags & (EDGE_COMPLEX | EDGE_FAKE)) == 0
&& (!JUMP_P (BB_END (bb2)) || simplejump_p (BB_END (bb2))));
/* Match conditional jumps - this may get tricky when fallthru and branch
edges are crossed. */
- if (bb1->succ
- && bb1->succ->succ_next
- && !bb1->succ->succ_next->succ_next
+ if (EDGE_COUNT (bb1->succs) == 2
&& any_condjump_p (BB_END (bb1))
&& onlyjump_p (BB_END (bb1)))
{
@@ -1237,9 +1240,7 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
rtx set1, set2, cond1, cond2;
enum rtx_code code1, code2;
- if (!bb2->succ
- || !bb2->succ->succ_next
- || bb2->succ->succ_next->succ_next
+ if (EDGE_COUNT (bb2->succs) != 2
|| !any_condjump_p (BB_END (bb2))
|| !onlyjump_p (BB_END (bb2)))
return false;
@@ -1252,10 +1253,10 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
/* Get around possible forwarders on fallthru edges. Other cases
should be optimized out already. */
if (FORWARDER_BLOCK_P (f1->dest))
- f1 = f1->dest->succ;
+ f1 = EDGE_SUCC (f1->dest, 0);
if (FORWARDER_BLOCK_P (f2->dest))
- f2 = f2->dest->succ;
+ f2 = EDGE_SUCC (f2->dest, 0);
/* To simplify use of this function, return false if there are
unneeded forwarder blocks. These will get eliminated later
@@ -1425,9 +1426,13 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
/* Search the outgoing edges, ensure that the counts do match, find possible
fallthru and exception handling edges since these needs more
validation. */
- for (e1 = bb1->succ, e2 = bb2->succ; e1 && e2;
- e1 = e1->succ_next, e2 = e2->succ_next)
+ if (EDGE_COUNT (bb1->succs) != EDGE_COUNT (bb2->succs))
+ return false;
+
+ FOR_EACH_EDGE (e1, ei, bb1->succs)
{
+ e2 = EDGE_SUCC (bb2, ei.index);
+
if (e1->flags & EDGE_EH)
nehedges1++;
@@ -1441,8 +1446,7 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
}
/* If number of edges of various types does not match, fail. */
- if (e1 || e2
- || nehedges1 != nehedges2
+ if (nehedges1 != nehedges2
|| (fallthru1 != 0) != (fallthru2 != 0))
return false;
@@ -1450,9 +1454,9 @@ outgoing_edges_match (int mode, basic_block bb1, basic_block bb2)
if (fallthru1)
{
basic_block d1 = (forwarder_block_p (fallthru1->dest)
- ? fallthru1->dest->succ->dest: fallthru1->dest);
+ ? EDGE_SUCC (fallthru1->dest, 0)->dest: fallthru1->dest);
basic_block d2 = (forwarder_block_p (fallthru2->dest)
- ? fallthru2->dest->succ->dest: fallthru2->dest);
+ ? EDGE_SUCC (fallthru2->dest, 0)->dest: fallthru2->dest);
if (d1 != d2)
return false;
@@ -1487,6 +1491,7 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
basic_block redirect_to, redirect_from, to_remove;
rtx newpos1, newpos2;
edge s;
+ edge_iterator ei;
newpos1 = newpos2 = NULL_RTX;
@@ -1506,15 +1511,13 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
about multiple entry or chained forwarders, as they will be optimized
away. We do this to look past the unconditional jump following a
conditional jump that is required due to the current CFG shape. */
- if (src1->pred
- && !src1->pred->pred_next
+ if (EDGE_COUNT (src1->preds) == 1
&& FORWARDER_BLOCK_P (src1))
- e1 = src1->pred, src1 = e1->src;
+ e1 = EDGE_PRED (src1, 0), src1 = e1->src;
- if (src2->pred
- && !src2->pred->pred_next
+ if (EDGE_COUNT (src2->preds) == 1
&& FORWARDER_BLOCK_P (src2))
- e2 = src2->pred, src2 = e2->src;
+ e2 = EDGE_PRED (src2, 0), src2 = e2->src;
/* Nothing to do if we reach ENTRY, or a common source block. */
if (src1 == ENTRY_BLOCK_PTR || src2 == ENTRY_BLOCK_PTR)
@@ -1524,16 +1527,16 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
/* Seeing more than 1 forwarder blocks would confuse us later... */
if (FORWARDER_BLOCK_P (e1->dest)
- && FORWARDER_BLOCK_P (e1->dest->succ->dest))
+ && FORWARDER_BLOCK_P (EDGE_SUCC (e1->dest, 0)->dest))
return false;
if (FORWARDER_BLOCK_P (e2->dest)
- && FORWARDER_BLOCK_P (e2->dest->succ->dest))
+ && FORWARDER_BLOCK_P (EDGE_SUCC (e2->dest, 0)->dest))
return false;
/* Likewise with dead code (possibly newly created by the other optimizations
of cfg_cleanup). */
- if (!src1->pred || !src2->pred)
+ if (EDGE_COUNT (src1->preds) == 0 || EDGE_COUNT (src2->preds) == 0)
return false;
/* Look for the common insn sequence, part the first ... */
@@ -1606,19 +1609,20 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
redirect_to->flags |= BB_DIRTY;
/* Recompute the frequencies and counts of outgoing edges. */
- for (s = redirect_to->succ; s; s = s->succ_next)
+ FOR_EACH_EDGE (s, ei, redirect_to->succs)
{
edge s2;
+ edge_iterator ei;
basic_block d = s->dest;
if (FORWARDER_BLOCK_P (d))
- d = d->succ->dest;
+ d = EDGE_SUCC (d, 0)->dest;
- for (s2 = src1->succ; ; s2 = s2->succ_next)
+ FOR_EACH_EDGE (s2, ei, src1->succs)
{
basic_block d2 = s2->dest;
if (FORWARDER_BLOCK_P (d2))
- d2 = d2->succ->dest;
+ d2 = EDGE_SUCC (d2, 0)->dest;
if (d == d2)
break;
}
@@ -1630,16 +1634,16 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
into infinite loop. */
if (FORWARDER_BLOCK_P (s->dest))
{
- s->dest->succ->count += s2->count;
+ EDGE_SUCC (s->dest, 0)->count += s2->count;
s->dest->count += s2->count;
s->dest->frequency += EDGE_FREQUENCY (s);
}
if (FORWARDER_BLOCK_P (s2->dest))
{
- s2->dest->succ->count -= s2->count;
- if (s2->dest->succ->count < 0)
- s2->dest->succ->count = 0;
+ EDGE_SUCC (s2->dest, 0)->count -= s2->count;
+ if (EDGE_SUCC (s2->dest, 0)->count < 0)
+ EDGE_SUCC (s2->dest, 0)->count = 0;
s2->dest->count -= s2->count;
s2->dest->frequency -= EDGE_FREQUENCY (s);
if (s2->dest->frequency < 0)
@@ -1669,9 +1673,9 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
newpos1 = NEXT_INSN (newpos1);
redirect_from = split_block (src1, PREV_INSN (newpos1))->src;
- to_remove = redirect_from->succ->dest;
+ to_remove = EDGE_SUCC (redirect_from, 0)->dest;
- redirect_edge_and_branch_force (redirect_from->succ, redirect_to);
+ redirect_edge_and_branch_force (EDGE_SUCC (redirect_from, 0), redirect_to);
delete_basic_block (to_remove);
update_forwarder_flag (redirect_from);
@@ -1686,12 +1690,14 @@ try_crossjump_to_edge (int mode, edge e1, edge e2)
static bool
try_crossjump_bb (int mode, basic_block bb)
{
- edge e, e2, nexte2, nexte, fallthru;
+ edge e, e2, fallthru;
bool changed;
- int n = 0, max;
+ unsigned max, ix, ix2;
+ basic_block ev, ev2;
+ edge_iterator ei;
/* Nothing to do if there is not at least two incoming edges. */
- if (!bb->pred || !bb->pred->pred_next)
+ if (EDGE_COUNT (bb->preds) < 2)
return false;
/* If we are partitioning hot/cold basic blocks, we don't want to
@@ -1705,8 +1711,8 @@ try_crossjump_bb (int mode, basic_block bb)
bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
if (flag_reorder_blocks_and_partition
- && (BB_PARTITION (bb->pred->src) != BB_PARTITION (bb->pred->pred_next->src)
- || (bb->pred->flags & EDGE_CROSSING)))
+ && (BB_PARTITION (EDGE_PRED (bb, 0)->src) != BB_PARTITION (EDGE_PRED (bb, 1)->src)
+ || (EDGE_PRED (bb, 0)->flags & EDGE_CROSSING)))
return false;
/* It is always cheapest to redirect a block that ends in a branch to
@@ -1714,18 +1720,21 @@ try_crossjump_bb (int mode, basic_block bb)
program. We'll try that combination first. */
fallthru = NULL;
max = PARAM_VALUE (PARAM_MAX_CROSSJUMP_EDGES);
- for (e = bb->pred; e ; e = e->pred_next, n++)
+
+ if (EDGE_COUNT (bb->preds) > max)
+ return false;
+
+ FOR_EACH_EDGE (e, ei, bb->preds)
{
if (e->flags & EDGE_FALLTHRU)
- fallthru = e;
- if (n > max)
- return false;
+ fallthru = e;
}
changed = false;
- for (e = bb->pred; e; e = nexte)
+ for (ix = 0, ev = bb; ix < EDGE_COUNT (ev->preds); )
{
- nexte = e->pred_next;
+ e = EDGE_PRED (ev, ix);
+ ix++;
/* As noted above, first try with the fallthru predecessor. */
if (fallthru)
@@ -1744,7 +1753,8 @@ try_crossjump_bb (int mode, basic_block bb)
if (try_crossjump_to_edge (mode, e, fallthru))
{
changed = true;
- nexte = bb->pred;
+ ix = 0;
+ ev = bb;
continue;
}
}
@@ -1761,12 +1771,13 @@ try_crossjump_bb (int mode, basic_block bb)
can eliminate redundant checks of crossjump(A,B) by arbitrarily
choosing to do the check from the block for which the edge
in question is the first successor of A. */
- if (e->src->succ != e)
+ if (EDGE_SUCC (e->src, 0) != e)
continue;
- for (e2 = bb->pred; e2; e2 = nexte2)
+ for (ix2 = 0, ev2 = bb; ix2 < EDGE_COUNT (ev2->preds); )
{
- nexte2 = e2->pred_next;
+ e2 = EDGE_PRED (ev2, ix2);
+ ix2++;
if (e2 == e)
continue;
@@ -1792,7 +1803,8 @@ try_crossjump_bb (int mode, basic_block bb)
if (try_crossjump_to_edge (mode, e, e2))
{
changed = true;
- nexte = bb->pred;
+ ev2 = bb;
+ ix = 0;
break;
}
}
@@ -1844,7 +1856,7 @@ try_optimize_cfg (int mode)
bool changed_here = false;
/* Delete trivially dead basic blocks. */
- while (b->pred == NULL)
+ while (EDGE_COUNT (b->preds) == 0)
{
c = b->prev_bb;
if (dump_file)
@@ -1858,9 +1870,9 @@ try_optimize_cfg (int mode)
}
/* Remove code labels no longer used. */
- if (b->pred->pred_next == NULL
- && (b->pred->flags & EDGE_FALLTHRU)
- && !(b->pred->flags & EDGE_COMPLEX)
+ if (EDGE_COUNT (b->preds) == 1
+ && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
+ && !(EDGE_PRED (b, 0)->flags & EDGE_COMPLEX)
&& LABEL_P (BB_HEAD (b))
/* If the previous block ends with a branch to this
block, we can't delete the label. Normally this
@@ -1868,10 +1880,10 @@ try_optimize_cfg (int mode)
if CASE_DROPS_THRU, this can be a tablejump with
some element going to the same place as the
default (fallthru). */
- && (b->pred->src == ENTRY_BLOCK_PTR
- || !JUMP_P (BB_END (b->pred->src))
+ && (EDGE_PRED (b, 0)->src == ENTRY_BLOCK_PTR
+ || !JUMP_P (BB_END (EDGE_PRED (b, 0)->src))
|| ! label_is_jump_target_p (BB_HEAD (b),
- BB_END (b->pred->src))))
+ BB_END (EDGE_PRED (b, 0)->src))))
{
rtx label = BB_HEAD (b);
@@ -1892,13 +1904,13 @@ try_optimize_cfg (int mode)
/* If we fall through an empty block, we can remove it. */
if (!(mode & CLEANUP_CFGLAYOUT)
- && b->pred->pred_next == NULL
- && (b->pred->flags & EDGE_FALLTHRU)
+ && EDGE_COUNT (b->preds) == 1
+ && (EDGE_PRED (b, 0)->flags & EDGE_FALLTHRU)
&& !LABEL_P (BB_HEAD (b))
&& FORWARDER_BLOCK_P (b)
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
- && (b->succ->flags & EDGE_FALLTHRU)
+ && (EDGE_SUCC (b, 0)->flags & EDGE_FALLTHRU)
&& n_basic_blocks > 1)
{
if (dump_file)
@@ -1907,17 +1919,17 @@ try_optimize_cfg (int mode)
b->index);
c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
- redirect_edge_succ_nodup (b->pred, b->succ->dest);
+ redirect_edge_succ_nodup (EDGE_PRED (b, 0), EDGE_SUCC (b, 0)->dest);
delete_basic_block (b);
changed = true;
b = c;
}
- if ((s = b->succ) != NULL
- && s->succ_next == NULL
+ if (EDGE_COUNT (b->succs) == 1
+ && (s = EDGE_SUCC (b, 0))
&& !(s->flags & EDGE_COMPLEX)
&& (c = s->dest) != EXIT_BLOCK_PTR
- && c->pred->pred_next == NULL
+ && EDGE_COUNT (c->preds) == 1
&& b != c)
{
/* When not in cfg_layout mode use code aware of reordering
@@ -1959,12 +1971,11 @@ try_optimize_cfg (int mode)
non-trivial jump instruction without side-effects, we
can either delete the jump entirely, or replace it
with a simple unconditional jump. */
- if (b->succ
- && ! b->succ->succ_next
- && b->succ->dest != EXIT_BLOCK_PTR
+ if (EDGE_COUNT (b->succs) == 1
+ && EDGE_SUCC (b, 0)->dest != EXIT_BLOCK_PTR
&& onlyjump_p (BB_END (b))
&& !find_reg_note (BB_END (b), REG_CROSSING_JUMP, NULL_RTX)
- && try_redirect_by_replacing_jump (b->succ, b->succ->dest,
+ && try_redirect_by_replacing_jump (EDGE_SUCC (b, 0), EDGE_SUCC (b, 0)->dest,
(mode & CLEANUP_CFGLAYOUT) != 0))
{
update_forwarder_flag (b);
@@ -2049,12 +2060,11 @@ merge_seq_blocks (void)
for (bb = ENTRY_BLOCK_PTR->next_bb; bb != EXIT_BLOCK_PTR; )
{
- if (bb->succ
- && !bb->succ->succ_next
- && can_merge_blocks_p (bb, bb->succ->dest))
+ if (EDGE_COUNT (bb->succs) == 1
+ && can_merge_blocks_p (bb, EDGE_SUCC (bb, 0)->dest))
{
/* Merge the blocks and retry. */
- merge_blocks (bb, bb->succ->dest);
+ merge_blocks (bb, EDGE_SUCC (bb, 0)->dest);
changed = true;
continue;
}