aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2002-05-16 19:31:56 -0700
committerRichard Henderson <rth@gcc.gnu.org>2002-05-16 19:31:56 -0700
commit0b17ab2f5b1184fdb568786f791bc0613e574241 (patch)
tree94c8895c6dde3b282518d4c9951067cd0ac517fd
parent8ae86b3cd8c96e287714f127879b018ac7fccd7d (diff)
downloadgcc-0b17ab2f5b1184fdb568786f791bc0613e574241.zip
gcc-0b17ab2f5b1184fdb568786f791bc0613e574241.tar.gz
gcc-0b17ab2f5b1184fdb568786f791bc0613e574241.tar.bz2
Revert "Basic block renumbering removal", and two followup patches.
From-SVN: r53537
-rw-r--r--gcc/ChangeLog12
-rw-r--r--gcc/basic-block.h34
-rw-r--r--gcc/bb-reorder.c26
-rw-r--r--gcc/cfg.c147
-rw-r--r--gcc/cfganal.c326
-rw-r--r--gcc/cfgbuild.c126
-rw-r--r--gcc/cfgcleanup.c98
-rw-r--r--gcc/cfglayout.c74
-rw-r--r--gcc/cfgloop.c91
-rw-r--r--gcc/cfgrtl.c242
-rw-r--r--gcc/combine.c261
-rw-r--r--gcc/conflict.c5
-rw-r--r--gcc/df.c244
-rw-r--r--gcc/df.h4
-rw-r--r--gcc/dominance.c59
-rw-r--r--gcc/final.c11
-rw-r--r--gcc/flow.c97
-rw-r--r--gcc/function.c2
-rw-r--r--gcc/gcse.c478
-rw-r--r--gcc/global.c35
-rw-r--r--gcc/graph.c25
-rw-r--r--gcc/haifa-sched.c31
-rw-r--r--gcc/ifcvt.c60
-rw-r--r--gcc/lcm.c370
-rw-r--r--gcc/local-alloc.c38
-rw-r--r--gcc/loop.c2
-rw-r--r--gcc/predict.c181
-rw-r--r--gcc/print-rtl.c2
-rw-r--r--gcc/profile.c99
-rw-r--r--gcc/recog.c17
-rw-r--r--gcc/reg-stack.c27
-rw-r--r--gcc/regclass.c13
-rw-r--r--gcc/regmove.c29
-rw-r--r--gcc/regrename.c27
-rw-r--r--gcc/reload1.c19
-rw-r--r--gcc/reorg.c2
-rw-r--r--gcc/resource.c10
-rw-r--r--gcc/sbitmap.c18
-rw-r--r--gcc/sched-deps.c2
-rw-r--r--gcc/sched-ebb.c21
-rw-r--r--gcc/sched-rgn.c161
-rw-r--r--gcc/sibcall.c4
-rw-r--r--gcc/ssa-ccp.c22
-rw-r--r--gcc/ssa-dce.c29
-rw-r--r--gcc/ssa.c120
45 files changed, 1930 insertions, 1771 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 703d303..c3cb9b0 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,15 @@
+2002-05-16 Richard Henderson <rth@redhat.com>
+
+ * basic-block.h, bb-reorder.c, cfg.c, cfganal.c, cfgbuild.c,
+ cfgcleanup.c, cfglayout.c, cfgloop.c, cfgrtl.c, combine.c,
+ conflict.c, df.c, df.h, dominance.c, final.c, flow.c, function.c,
+ gcse.c, global.c, graph.c, haifa-sched.c, ifcvt.c, lcm.c,
+ local-alloc.c, loop.c, predict.c, print-rtl.c, profile.c,
+ recog.c, reg-stack.c, regclass.c, regmove.c, regrename.c,
+ reload1.c, reorg.c, resource.c, sbitmap.c, sched-deps.c,
+ sched-ebb.c, sched-rgn.c, sibcall.c, ssa-ccp.c, ssa-dce.c, ssa.c:
+ Revert "Basic block renumbering removal", and two followup patches.
+
2002-05-16 Jason Thorpe <thorpej@wasabisystems.com>
* lcm.c (optimize_mode_switching): Revert previous change.
diff --git a/gcc/basic-block.h b/gcc/basic-block.h
index 75d97cb..5615b14 100644
--- a/gcc/basic-block.h
+++ b/gcc/basic-block.h
@@ -203,11 +203,8 @@ typedef struct basic_block_def {
/* Auxiliary info specific to a pass. */
void *aux;
- /* The index of a block. */
- int sindex;
-
- /* Previous and next blocks in the chain. */
- struct basic_block_def *prev_bb, *next_bb;
+ /* The index of this block. */
+ int index;
/* The loop depth of this block. */
int loop_depth;
@@ -231,11 +228,7 @@ typedef struct basic_block_def {
/* Number of basic blocks in the current function. */
-extern int num_basic_blocks;
-
-/* First free basic block number. */
-
-extern int last_basic_block;
+extern int n_basic_blocks;
/* Number of edges in the current function. */
@@ -247,16 +240,6 @@ extern varray_type basic_block_info;
#define BASIC_BLOCK(N) (VARRAY_BB (basic_block_info, (N)))
-/* For iterating over basic blocks. */
-#define FOR_BB_BETWEEN(BB, FROM, TO, DIR) \
- for (BB = FROM; BB != TO; BB = BB->DIR)
-
-#define FOR_ALL_BB(BB) \
- FOR_BB_BETWEEN (BB, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR, next_bb)
-
-#define FOR_ALL_BB_REVERSE(BB) \
- FOR_BB_BETWEEN (BB, EXIT_BLOCK_PTR->prev_bb, ENTRY_BLOCK_PTR, prev_bb)
-
/* What registers are live at the setjmp call. */
extern regset regs_live_at_setjmp;
@@ -301,7 +284,7 @@ extern struct basic_block_def entry_exit_blocks[2];
extern varray_type basic_block_for_insn;
#define BLOCK_FOR_INSN(INSN) VARRAY_BB (basic_block_for_insn, INSN_UID (INSN))
-#define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->sindex + 0)
+#define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0)
extern void compute_bb_for_insn PARAMS ((int));
extern void free_bb_for_insn PARAMS ((void));
@@ -331,8 +314,8 @@ extern void remove_edge PARAMS ((edge));
extern void redirect_edge_succ PARAMS ((edge, basic_block));
extern edge redirect_edge_succ_nodup PARAMS ((edge, basic_block));
extern void redirect_edge_pred PARAMS ((edge, basic_block));
-extern basic_block create_basic_block_structure PARAMS ((int, rtx, rtx, rtx, basic_block));
-extern basic_block create_basic_block PARAMS ((rtx, rtx, basic_block));
+extern basic_block create_basic_block_structure PARAMS ((int, rtx, rtx, rtx));
+extern basic_block create_basic_block PARAMS ((int, rtx, rtx));
extern int flow_delete_block PARAMS ((basic_block));
extern int flow_delete_block_noexpunge PARAMS ((basic_block));
extern void clear_bb_flags PARAMS ((void));
@@ -656,15 +639,12 @@ extern void reorder_basic_blocks PARAMS ((void));
extern void dump_bb PARAMS ((basic_block, FILE *));
extern void debug_bb PARAMS ((basic_block));
extern void debug_bb_n PARAMS ((int));
-extern basic_block debug_num2bb PARAMS ((int));
extern void dump_regset PARAMS ((regset, FILE *));
extern void debug_regset PARAMS ((regset));
extern void allocate_reg_life_data PARAMS ((void));
extern void allocate_bb_life_data PARAMS ((void));
extern void expunge_block PARAMS ((basic_block));
-extern void link_block PARAMS ((basic_block, basic_block));
-extern void unlink_block PARAMS ((basic_block));
-extern void compact_blocks PARAMS ((void));
+extern void expunge_block_nocompact PARAMS ((basic_block));
extern basic_block alloc_block PARAMS ((void));
extern void find_unreachable_blocks PARAMS ((void));
extern int delete_noop_moves PARAMS ((rtx));
diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index 06212bc..3647ad6 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -102,11 +102,14 @@ static void
make_reorder_chain ()
{
basic_block prev = NULL;
- basic_block next, bb;
+ int nbb_m1 = n_basic_blocks - 1;
+ basic_block next;
/* Loop until we've placed every block. */
do
{
+ int i;
+
next = NULL;
/* Find the next unplaced block. */
@@ -116,13 +119,12 @@ make_reorder_chain ()
remove from the list as we place. The head of that list is
what we're looking for here. */
- FOR_ALL_BB (bb)
- if (! RBI (bb)->visited)
- {
+ for (i = 0; i <= nbb_m1 && !next; ++i)
+ {
+ basic_block bb = BASIC_BLOCK (i);
+ if (! RBI (bb)->visited)
next = bb;
- break;
- }
-
+ }
if (next)
prev = make_reorder_chain_1 (next, prev);
}
@@ -156,13 +158,13 @@ make_reorder_chain_1 (bb, prev)
restart:
RBI (prev)->next = bb;
- if (rtl_dump_file && prev->next_bb != bb)
+ if (rtl_dump_file && prev->index + 1 != bb->index)
fprintf (rtl_dump_file, "Reordering block %d after %d\n",
- bb->sindex, prev->sindex);
+ bb->index, prev->index);
}
else
{
- if (bb->prev_bb != ENTRY_BLOCK_PTR)
+ if (bb->index != 0)
abort ();
}
RBI (bb)->visited = 1;
@@ -212,7 +214,7 @@ make_reorder_chain_1 (bb, prev)
if (! next)
{
for (e = bb->succ; e ; e = e->succ_next)
- if (e->dest == bb->next_bb)
+ if (e->dest->index == bb->index + 1)
{
if ((e->flags & EDGE_FALLTHRU)
|| (e->dest->succ
@@ -256,7 +258,7 @@ make_reorder_chain_1 (bb, prev)
void
reorder_basic_blocks ()
{
- if (num_basic_blocks <= 1)
+ if (n_basic_blocks <= 1)
return;
if ((* targetm.cannot_modify_jumps_p) ())
diff --git a/gcc/cfg.c b/gcc/cfg.c
index 0300484..47dfb23 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -63,10 +63,7 @@ static char *flow_firstobj;
/* Number of basic blocks in the current function. */
-int num_basic_blocks;
-
-/* First free basic block number. */
-int last_basic_block;
+int n_basic_blocks;
/* Number of edges in the current function. */
@@ -96,8 +93,6 @@ struct basic_block_def entry_exit_blocks[2]
NULL, /* global_live_at_end */
NULL, /* aux */
ENTRY_BLOCK, /* index */
- NULL, /* prev_bb */
- EXIT_BLOCK_PTR, /* next_bb */
0, /* loop_depth */
0, /* count */
0, /* frequency */
@@ -116,8 +111,6 @@ struct basic_block_def entry_exit_blocks[2]
NULL, /* global_live_at_end */
NULL, /* aux */
EXIT_BLOCK, /* index */
- ENTRY_BLOCK_PTR, /* prev_bb */
- NULL, /* next_bb */
0, /* loop_depth */
0, /* count */
0, /* frequency */
@@ -170,11 +163,12 @@ free_edge (e)
void
clear_edges ()
{
- basic_block bb;
+ int i;
edge e;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; ++i)
{
+ basic_block bb = BASIC_BLOCK (i);
edge e = bb->succ;
while (e)
@@ -226,66 +220,36 @@ alloc_block ()
return bb;
}
-/* Link block B to chain after AFTER. */
-void
-link_block (b, after)
- basic_block b, after;
-{
- b->next_bb = after->next_bb;
- b->prev_bb = after;
- after->next_bb = b;
- b->next_bb->prev_bb = b;
-}
+/* Remove block B from the basic block array and compact behind it. */
-/* Unlink block B from chain. */
void
-unlink_block (b)
+expunge_block_nocompact (b)
basic_block b;
{
- b->next_bb->prev_bb = b->prev_bb;
- b->prev_bb->next_bb = b->next_bb;
+ /* Invalidate data to make bughunting easier. */
+ memset (b, 0, sizeof *b);
+ b->index = -3;
+ b->succ = (edge) first_deleted_block;
+ first_deleted_block = (basic_block) b;
}
-/* Sequentially order blocks and compact the arrays. */
void
-compact_blocks ()
+expunge_block (b)
+ basic_block b;
{
- basic_block *bbs = xcalloc (num_basic_blocks, sizeof (basic_block));
- int i;
- basic_block bb;
-
- i = 0;
- FOR_ALL_BB (bb)
- bbs[i++] = bb;
-
- if (i != num_basic_blocks)
- abort ();
+ int i, n = n_basic_blocks;
- for (i = 0; i < num_basic_blocks; i++)
+ for (i = b->index; i + 1 < n; ++i)
{
- bbs[i]->sindex = i;
- BASIC_BLOCK (i) = bbs[i];
+ basic_block x = BASIC_BLOCK (i + 1);
+ BASIC_BLOCK (i) = x;
+ x->index = i;
}
- last_basic_block = num_basic_blocks;
-
- free (bbs);
-}
-/* Remove block B from the basic block array. */
-
-void
-expunge_block (b)
- basic_block b;
-{
- unlink_block (b);
- BASIC_BLOCK (b->sindex) = NULL;
- num_basic_blocks--;
+ n_basic_blocks--;
+ basic_block_info->num_elements--;
- /* Invalidate data to make bughunting easier. */
- memset (b, 0, sizeof *b);
- b->sindex = -3;
- b->succ = (edge) first_deleted_block;
- first_deleted_block = (basic_block) b;
+ expunge_block_nocompact (b);
}
/* Create an edge connecting SRC and DST with FLAGS optionally using
@@ -310,7 +274,7 @@ cached_make_edge (edge_cache, src, dst, flags)
{
default:
/* Quick test for non-existence of the edge. */
- if (! TEST_BIT (edge_cache[src->sindex], dst->sindex))
+ if (! TEST_BIT (edge_cache[src->index], dst->index))
break;
/* The edge exists; early exit if no work to do. */
@@ -350,7 +314,7 @@ cached_make_edge (edge_cache, src, dst, flags)
dst->pred = e;
if (use_edge_cache)
- SET_BIT (edge_cache[src->sindex], dst->sindex);
+ SET_BIT (edge_cache[src->index], dst->index);
return e;
}
@@ -489,10 +453,11 @@ redirect_edge_pred (e, new_pred)
void
clear_bb_flags ()
{
- basic_block bb;
-
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- bb->flags = 0;
+ int i;
+ ENTRY_BLOCK_PTR->flags = 0;
+ EXIT_BLOCK_PTR->flags = 0;
+ for (i = 0; i < n_basic_blocks; i++)
+ BASIC_BLOCK (i)->flags = 0;
}
void
@@ -500,7 +465,6 @@ dump_flow_info (file)
FILE *file;
{
int i;
- basic_block bb;
static const char * const reg_class_names[] = REG_CLASS_NAMES;
fprintf (file, "%d registers.\n", max_regno);
@@ -547,17 +511,16 @@ dump_flow_info (file)
fprintf (file, ".\n");
}
- fprintf (file, "\n%d basic blocks, %d edges.\n", num_basic_blocks, n_edges);
- FOR_ALL_BB (bb)
+ fprintf (file, "\n%d basic blocks, %d edges.\n", n_basic_blocks, n_edges);
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
edge e;
int sum;
gcov_type lsum;
fprintf (file, "\nBasic block %d: first insn %d, last %d, ",
- bb->sindex, INSN_UID (bb->head), INSN_UID (bb->end));
- fprintf (file, "prev %d, next %d, ",
- bb->prev_bb->sindex, bb->next_bb->sindex);
+ i, INSN_UID (bb->head), INSN_UID (bb->end));
fprintf (file, "loop_depth %d, count ", bb->loop_depth);
fprintf (file, HOST_WIDEST_INT_PRINT_DEC, bb->count);
fprintf (file, ", freq %i.\n", bb->frequency);
@@ -632,7 +595,7 @@ dump_edge_info (file, e, do_succ)
else if (side == EXIT_BLOCK_PTR)
fputs (" EXIT", file);
else
- fprintf (file, " %d", side->sindex);
+ fprintf (file, " %d", side->index);
if (e->probability)
fprintf (file, " [%.1f%%] ", e->probability * 100.0 / REG_BR_PROB_BASE);
@@ -712,10 +675,10 @@ alloc_aux_for_blocks (size)
first_block_aux_obj = (char *) obstack_alloc (&block_aux_obstack, 0);
if (size)
{
- basic_block bb;
+ int i;
- FOR_ALL_BB (bb)
- alloc_aux_for_block (bb, size);
+ for (i = 0; i < n_basic_blocks; i++)
+ alloc_aux_for_block (BASIC_BLOCK (i), size);
alloc_aux_for_block (ENTRY_BLOCK_PTR, size);
alloc_aux_for_block (EXIT_BLOCK_PTR, size);
@@ -727,10 +690,13 @@ alloc_aux_for_blocks (size)
void
clear_aux_for_blocks ()
{
- basic_block bb;
+ int i;
+
+ for (i = 0; i < n_basic_blocks; i++)
+ BASIC_BLOCK (i)->aux = NULL;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- bb->aux = NULL;
+ ENTRY_BLOCK_PTR->aux = NULL;
+ EXIT_BLOCK_PTR->aux = NULL;
}
/* Free data allocated in block_aux_obstack and clear AUX pointers
@@ -784,12 +750,17 @@ alloc_aux_for_edges (size)
first_edge_aux_obj = (char *) obstack_alloc (&edge_aux_obstack, 0);
if (size)
{
- basic_block bb;
-
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ int i;
+ for (i = -1; i < n_basic_blocks; i++)
{
+ basic_block bb;
edge e;
+ if (i >= 0)
+ bb = BASIC_BLOCK (i);
+ else
+ bb = ENTRY_BLOCK_PTR;
+
for (e = bb->succ; e; e = e->succ_next)
alloc_aux_for_edge (e, size);
}
@@ -801,12 +772,18 @@ alloc_aux_for_edges (size)
void
clear_aux_for_edges ()
{
- basic_block bb;
+ int i;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ for (i = -1; i < n_basic_blocks; i++)
{
+ basic_block bb;
edge e;
+ if (i >= 0)
+ bb = BASIC_BLOCK (i);
+ else
+ bb = ENTRY_BLOCK_PTR;
+
for (e = bb->succ; e; e = e->succ_next)
e->aux = NULL;
}
@@ -825,11 +802,3 @@ free_aux_for_edges ()
clear_aux_for_edges ();
}
-
-/* The same as BASIC_BLOCK, but usable from debugger. */
-basic_block
-debug_num2bb (num)
- int num;
-{
- return BASIC_BLOCK (num);
-}
diff --git a/gcc/cfganal.c b/gcc/cfganal.c
index 9d6c000..a64124c 100644
--- a/gcc/cfganal.c
+++ b/gcc/cfganal.c
@@ -87,7 +87,7 @@ can_fallthru (src, target)
rtx insn = src->end;
rtx insn2 = target->head;
- if (src->next_bb != target)
+ if (src->index + 1 != target->index)
return 0;
if (!active_insn_p (insn2))
@@ -120,15 +120,15 @@ mark_dfs_back_edges ()
bool found = false;
/* Allocate the preorder and postorder number arrays. */
- pre = (int *) xcalloc (last_basic_block, sizeof (int));
- post = (int *) xcalloc (last_basic_block, sizeof (int));
+ pre = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ post = (int *) xcalloc (n_basic_blocks, sizeof (int));
/* Allocate stack for back-tracking up CFG. */
- stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
+ stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (n_basic_blocks);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@@ -149,12 +149,12 @@ mark_dfs_back_edges ()
e->flags &= ~EDGE_DFS_BACK;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
+ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
{
/* Mark that we have visited the destination. */
- SET_BIT (visited, dest->sindex);
+ SET_BIT (visited, dest->index);
- pre[dest->sindex] = prenum++;
+ pre[dest->index] = prenum++;
if (dest->succ)
{
/* Since the DEST node has been visited for the first
@@ -162,17 +162,17 @@ mark_dfs_back_edges ()
stack[sp++] = dest->succ;
}
else
- post[dest->sindex] = postnum++;
+ post[dest->index] = postnum++;
}
else
{
if (dest != EXIT_BLOCK_PTR && src != ENTRY_BLOCK_PTR
- && pre[src->sindex] >= pre[dest->sindex]
- && post[dest->sindex] == 0)
+ && pre[src->index] >= pre[dest->index]
+ && post[dest->index] == 0)
e->flags |= EDGE_DFS_BACK, found = true;
if (! e->succ_next && src != ENTRY_BLOCK_PTR)
- post[src->sindex] = postnum++;
+ post[src->index] = postnum++;
if (e->succ_next)
stack[sp - 1] = e->succ_next;
@@ -194,10 +194,10 @@ mark_dfs_back_edges ()
void
set_edge_can_fallthru_flag ()
{
- basic_block bb;
-
- FOR_ALL_BB (bb)
+ int i;
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
edge e;
/* The FALLTHRU edge is also CAN_FALLTHRU edge. */
@@ -258,16 +258,29 @@ flow_call_edges_add (blocks)
{
int i;
int blocks_split = 0;
- int last_bb = last_basic_block;
+ int bb_num = 0;
+ basic_block *bbs;
bool check_last_block = false;
- if (num_basic_blocks == 0)
- return 0;
+ /* Map bb indices into basic block pointers since split_block
+ will renumber the basic blocks. */
+
+ bbs = xmalloc (n_basic_blocks * sizeof (*bbs));
if (! blocks)
- check_last_block = true;
+ {
+ for (i = 0; i < n_basic_blocks; i++)
+ bbs[bb_num++] = BASIC_BLOCK (i);
+
+ check_last_block = true;
+ }
else
- check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->sindex);
+ EXECUTE_IF_SET_IN_SBITMAP (blocks, 0, i,
+ {
+ bbs[bb_num++] = BASIC_BLOCK (i);
+ if (i == n_basic_blocks - 1)
+ check_last_block = true;
+ });
/* In the last basic block, before epilogue generation, there will be
a fallthru edge to EXIT. Special care is required if the last insn
@@ -283,7 +296,7 @@ flow_call_edges_add (blocks)
Handle this by adding a dummy instruction in a new last basic block. */
if (check_last_block)
{
- basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ basic_block bb = BASIC_BLOCK (n_basic_blocks - 1);
rtx insn = bb->end;
/* Back up past insns that must be kept in the same block as a call. */
@@ -308,18 +321,12 @@ flow_call_edges_add (blocks)
calls since there is no way that we can determine if they will
return or not... */
- for (i = 0; i < last_bb; i++)
+ for (i = 0; i < bb_num; i++)
{
- basic_block bb = BASIC_BLOCK (i);
+ basic_block bb = bbs[i];
rtx insn;
rtx prev_insn;
- if (!bb)
- continue;
-
- if (blocks && !TEST_BIT (blocks, i))
- continue;
-
for (insn = bb->end; ; insn = prev_insn)
{
prev_insn = PREV_INSN (insn);
@@ -367,6 +374,7 @@ flow_call_edges_add (blocks)
if (blocks_split)
verify_flow_info ();
+ free (bbs);
return blocks_split;
}
@@ -378,15 +386,16 @@ void
find_unreachable_blocks ()
{
edge e;
- basic_block *tos, *worklist, bb;
+ int i, n;
+ basic_block *tos, *worklist;
- tos = worklist =
- (basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks);
+ n = n_basic_blocks;
+ tos = worklist = (basic_block *) xmalloc (sizeof (basic_block) * n);
/* Clear all the reachability flags. */
- FOR_ALL_BB (bb)
- bb->flags &= ~BB_REACHABLE;
+ for (i = 0; i < n; ++i)
+ BASIC_BLOCK (i)->flags &= ~BB_REACHABLE;
/* Add our starting points to the worklist. Almost always there will
be only one. It isn't inconceivable that we might one day directly
@@ -436,22 +445,27 @@ create_edge_list ()
struct edge_list *elist;
edge e;
int num_edges;
+ int x;
int block_count;
- basic_block bb;
- block_count = num_basic_blocks + 2; /* Include the entry and exit blocks. */
+ block_count = n_basic_blocks + 2; /* Include the entry and exit blocks. */
num_edges = 0;
/* Determine the number of edges in the flow graph by counting successor
edges on each basic block. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ for (x = 0; x < n_basic_blocks; x++)
{
+ basic_block bb = BASIC_BLOCK (x);
for (e = bb->succ; e; e = e->succ_next)
num_edges++;
}
+ /* Don't forget successors of the entry block. */
+ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
+ num_edges++;
+
elist = (struct edge_list *) xmalloc (sizeof (struct edge_list));
elist->num_blocks = block_count;
elist->num_edges = num_edges;
@@ -459,10 +473,18 @@ create_edge_list ()
num_edges = 0;
- /* Follow successors of blocks, and register these edges. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
- for (e = bb->succ; e; e = e->succ_next)
- elist->index_to_edge[num_edges++] = e;
+ /* Follow successors of the entry block, and register these edges. */
+ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
+ elist->index_to_edge[num_edges++] = e;
+
+ for (x = 0; x < n_basic_blocks; x++)
+ {
+ basic_block bb = BASIC_BLOCK (x);
+
+ /* Follow all successors of blocks, and register these edges. */
+ for (e = bb->succ; e; e = e->succ_next)
+ elist->index_to_edge[num_edges++] = e;
+ }
return elist;
}
@@ -498,12 +520,12 @@ print_edge_list (f, elist)
if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR)
fprintf (f, "entry,");
else
- fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->sindex);
+ fprintf (f, "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR)
fprintf (f, "exit)\n");
else
- fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->sindex);
+ fprintf (f, "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
}
}
@@ -516,16 +538,17 @@ verify_edge_list (f, elist)
FILE *f;
struct edge_list *elist;
{
- int index, pred, succ;
+ int x, pred, succ, index;
edge e;
- basic_block bb, p, s;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ for (x = 0; x < n_basic_blocks; x++)
{
+ basic_block bb = BASIC_BLOCK (x);
+
for (e = bb->succ; e; e = e->succ_next)
{
- pred = e->src->sindex;
- succ = e->dest->sindex;
+ pred = e->src->index;
+ succ = e->dest->index;
index = EDGE_INDEX (elist, e->src, e->dest);
if (index == EDGE_INDEX_NO_EDGE)
{
@@ -533,21 +556,42 @@ verify_edge_list (f, elist)
continue;
}
- if (INDEX_EDGE_PRED_BB (elist, index)->sindex != pred)
+ if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
fprintf (f, "*p* Pred for index %d should be %d not %d\n",
- index, pred, INDEX_EDGE_PRED_BB (elist, index)->sindex);
- if (INDEX_EDGE_SUCC_BB (elist, index)->sindex != succ)
+ index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
+ if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
fprintf (f, "*p* Succ for index %d should be %d not %d\n",
- index, succ, INDEX_EDGE_SUCC_BB (elist, index)->sindex);
+ index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
+ }
+ }
+
+ for (e = ENTRY_BLOCK_PTR->succ; e; e = e->succ_next)
+ {
+ pred = e->src->index;
+ succ = e->dest->index;
+ index = EDGE_INDEX (elist, e->src, e->dest);
+ if (index == EDGE_INDEX_NO_EDGE)
+ {
+ fprintf (f, "*p* No index for edge from %d to %d\n", pred, succ);
+ continue;
}
+
+ if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
+ fprintf (f, "*p* Pred for index %d should be %d not %d\n",
+ index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
+ if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
+ fprintf (f, "*p* Succ for index %d should be %d not %d\n",
+ index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
}
/* We've verified that all the edges are in the list, no lets make sure
there are no spurious edges in the list. */
- FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
- FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
+ for (pred = 0; pred < n_basic_blocks; pred++)
+ for (succ = 0; succ < n_basic_blocks; succ++)
{
+ basic_block p = BASIC_BLOCK (pred);
+ basic_block s = BASIC_BLOCK (succ);
int found_edge = 0;
for (e = p->succ; e; e = e->succ_next)
@@ -564,16 +608,78 @@ verify_edge_list (f, elist)
break;
}
- if (EDGE_INDEX (elist, p, s)
+ if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ))
== EDGE_INDEX_NO_EDGE && found_edge != 0)
fprintf (f, "*** Edge (%d, %d) appears to not have an index\n",
- p->sindex, s->sindex);
- if (EDGE_INDEX (elist, p, s)
+ pred, succ);
+ if (EDGE_INDEX (elist, BASIC_BLOCK (pred), BASIC_BLOCK (succ))
!= EDGE_INDEX_NO_EDGE && found_edge == 0)
fprintf (f, "*** Edge (%d, %d) has index %d, but there is no edge\n",
- p->sindex, s->sindex, EDGE_INDEX (elist, p, s));
+ pred, succ, EDGE_INDEX (elist, BASIC_BLOCK (pred),
+ BASIC_BLOCK (succ)));
}
+ for (succ = 0; succ < n_basic_blocks; succ++)
+ {
+ basic_block p = ENTRY_BLOCK_PTR;
+ basic_block s = BASIC_BLOCK (succ);
+ int found_edge = 0;
+
+ for (e = p->succ; e; e = e->succ_next)
+ if (e->dest == s)
+ {
+ found_edge = 1;
+ break;
+ }
+
+ for (e = s->pred; e; e = e->pred_next)
+ if (e->src == p)
+ {
+ found_edge = 1;
+ break;
+ }
+
+ if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ))
+ == EDGE_INDEX_NO_EDGE && found_edge != 0)
+ fprintf (f, "*** Edge (entry, %d) appears to not have an index\n",
+ succ);
+ if (EDGE_INDEX (elist, ENTRY_BLOCK_PTR, BASIC_BLOCK (succ))
+ != EDGE_INDEX_NO_EDGE && found_edge == 0)
+ fprintf (f, "*** Edge (entry, %d) has index %d, but no edge exists\n",
+ succ, EDGE_INDEX (elist, ENTRY_BLOCK_PTR,
+ BASIC_BLOCK (succ)));
+ }
+
+ for (pred = 0; pred < n_basic_blocks; pred++)
+ {
+ basic_block p = BASIC_BLOCK (pred);
+ basic_block s = EXIT_BLOCK_PTR;
+ int found_edge = 0;
+
+ for (e = p->succ; e; e = e->succ_next)
+ if (e->dest == s)
+ {
+ found_edge = 1;
+ break;
+ }
+
+ for (e = s->pred; e; e = e->pred_next)
+ if (e->src == p)
+ {
+ found_edge = 1;
+ break;
+ }
+
+ if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR)
+ == EDGE_INDEX_NO_EDGE && found_edge != 0)
+ fprintf (f, "*** Edge (%d, exit) appears to not have an index\n",
+ pred);
+ if (EDGE_INDEX (elist, BASIC_BLOCK (pred), EXIT_BLOCK_PTR)
+ != EDGE_INDEX_NO_EDGE && found_edge == 0)
+ fprintf (f, "*** Edge (%d, exit) has index %d, but no edge exists\n",
+ pred, EDGE_INDEX (elist, BASIC_BLOCK (pred),
+ EXIT_BLOCK_PTR));
+ }
}
/* This routine will determine what, if any, edge there is between
@@ -628,8 +734,8 @@ flow_edge_list_print (str, edge_list, num_edges, file)
fprintf (file, "%s { ", str);
for (i = 0; i < num_edges; i++)
- fprintf (file, "%d->%d ", edge_list[i]->src->sindex,
- edge_list[i]->dest->sindex);
+ fprintf (file, "%d->%d ", edge_list[i]->src->index,
+ edge_list[i]->dest->index);
fputs ("}\n", file);
}
@@ -662,10 +768,13 @@ remove_fake_successors (bb)
void
remove_fake_edges ()
{
- basic_block bb;
+ int x;
+
+ for (x = 0; x < n_basic_blocks; x++)
+ remove_fake_successors (BASIC_BLOCK (x));
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
- remove_fake_successors (bb);
+ /* We've handled all successors except the entry block's. */
+ remove_fake_successors (ENTRY_BLOCK_PTR);
}
/* This function will add a fake edge between any block which has no
@@ -675,11 +784,11 @@ remove_fake_edges ()
void
add_noreturn_fake_exit_edges ()
{
- basic_block bb;
+ int x;
- FOR_ALL_BB (bb)
- if (bb->succ == NULL)
- make_single_succ_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ for (x = 0; x < n_basic_blocks; x++)
+ if (BASIC_BLOCK (x)->succ == NULL)
+ make_single_succ_edge (BASIC_BLOCK (x), EXIT_BLOCK_PTR, EDGE_FAKE);
}
/* This function adds a fake edge between any infinite loops to the
@@ -731,11 +840,11 @@ flow_reverse_top_sort_order_compute (rts_order)
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
- stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
+ stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (n_basic_blocks);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@@ -755,22 +864,22 @@ flow_reverse_top_sort_order_compute (rts_order)
dest = e->dest;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
+ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
{
/* Mark that we have visited the destination. */
- SET_BIT (visited, dest->sindex);
+ SET_BIT (visited, dest->index);
if (dest->succ)
/* Since the DEST node has been visited for the first
time, check its successors. */
stack[sp++] = dest->succ;
else
- rts_order[postnum++] = dest->sindex;
+ rts_order[postnum++] = dest->index;
}
else
{
if (! e->succ_next && src != ENTRY_BLOCK_PTR)
- rts_order[postnum++] = src->sindex;
+ rts_order[postnum++] = src->index;
if (e->succ_next)
stack[sp - 1] = e->succ_next;
@@ -798,15 +907,15 @@ flow_depth_first_order_compute (dfs_order, rc_order)
edge *stack;
int sp;
int dfsnum = 0;
- int rcnum = num_basic_blocks - 1;
+ int rcnum = n_basic_blocks - 1;
sbitmap visited;
/* Allocate stack for back-tracking up CFG. */
- stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
+ stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (n_basic_blocks);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@@ -826,13 +935,13 @@ flow_depth_first_order_compute (dfs_order, rc_order)
dest = e->dest;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
+ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
{
/* Mark that we have visited the destination. */
- SET_BIT (visited, dest->sindex);
+ SET_BIT (visited, dest->index);
if (dfs_order)
- dfs_order[dfsnum] = dest->sindex;
+ dfs_order[dfsnum] = dest->index;
dfsnum++;
@@ -843,7 +952,7 @@ flow_depth_first_order_compute (dfs_order, rc_order)
else if (rc_order)
/* There are no successors for the DEST node so assign
its reverse completion number. */
- rc_order[rcnum--] = dest->sindex;
+ rc_order[rcnum--] = dest->index;
}
else
{
@@ -851,7 +960,7 @@ flow_depth_first_order_compute (dfs_order, rc_order)
&& rc_order)
/* There are no more successors for the SRC node
so assign its reverse completion number. */
- rc_order[rcnum--] = src->sindex;
+ rc_order[rcnum--] = src->index;
if (e->succ_next)
stack[sp - 1] = e->succ_next;
@@ -864,12 +973,12 @@ flow_depth_first_order_compute (dfs_order, rc_order)
sbitmap_free (visited);
/* The number of nodes visited should not be greater than
- num_basic_blocks. */
- if (dfsnum > num_basic_blocks)
+ n_basic_blocks. */
+ if (dfsnum > n_basic_blocks)
abort ();
/* There are some nodes left in the CFG that are unreachable. */
- if (dfsnum < num_basic_blocks)
+ if (dfsnum < n_basic_blocks)
abort ();
return dfsnum;
@@ -905,30 +1014,30 @@ flow_preorder_transversal_compute (pot_order)
sbitmap visited;
struct dfst_node *node;
struct dfst_node *dfst;
- basic_block bb;
/* Allocate stack for back-tracking up CFG. */
- stack = (edge *) xmalloc ((num_basic_blocks + 1) * sizeof (edge));
+ stack = (edge *) xmalloc ((n_basic_blocks + 1) * sizeof (edge));
sp = 0;
/* Allocate the tree. */
- dfst = (struct dfst_node *) xcalloc (last_basic_block,
+ dfst = (struct dfst_node *) xcalloc (n_basic_blocks,
sizeof (struct dfst_node));
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
max_successors = 0;
- for (e = bb->succ; e; e = e->succ_next)
+ for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
max_successors++;
- if (max_successors)
- dfst[bb->sindex].node
- = (struct dfst_node **) xcalloc (max_successors,
- sizeof (struct dfst_node *));
+ dfst[i].node
+ = (max_successors
+ ? (struct dfst_node **) xcalloc (max_successors,
+ sizeof (struct dfst_node *))
+ : NULL);
}
/* Allocate bitmap to track nodes that have been visited. */
- visited = sbitmap_alloc (last_basic_block);
+ visited = sbitmap_alloc (n_basic_blocks);
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (visited);
@@ -947,17 +1056,17 @@ flow_preorder_transversal_compute (pot_order)
dest = e->dest;
/* Check if the edge destination has been visited yet. */
- if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->sindex))
+ if (dest != EXIT_BLOCK_PTR && ! TEST_BIT (visited, dest->index))
{
/* Mark that we have visited the destination. */
- SET_BIT (visited, dest->sindex);
+ SET_BIT (visited, dest->index);
/* Add the destination to the preorder tree. */
if (src != ENTRY_BLOCK_PTR)
{
- dfst[src->sindex].node[dfst[src->sindex].nnodes++]
- = &dfst[dest->sindex];
- dfst[dest->sindex].up = &dfst[src->sindex];
+ dfst[src->index].node[dfst[src->index].nnodes++]
+ = &dfst[dest->index];
+ dfst[dest->index].up = &dfst[src->index];
}
if (dest->succ)
@@ -979,7 +1088,7 @@ flow_preorder_transversal_compute (pot_order)
walking the tree from right to left. */
i = 0;
- node = &dfst[ENTRY_BLOCK_PTR->next_bb->sindex];
+ node = &dfst[0];
pot_order[i++] = 0;
while (node)
@@ -995,7 +1104,7 @@ flow_preorder_transversal_compute (pot_order)
/* Free the tree. */
- for (i = 0; i < last_basic_block; i++)
+ for (i = 0; i < n_basic_blocks; i++)
if (dfst[i].node)
free (dfst[i].node);
@@ -1037,12 +1146,12 @@ flow_dfs_compute_reverse_init (data)
depth_first_search_ds data;
{
/* Allocate stack for back-tracking up CFG. */
- data->stack = (basic_block *) xmalloc ((num_basic_blocks - (INVALID_BLOCK + 1))
+ data->stack = (basic_block *) xmalloc ((n_basic_blocks - (INVALID_BLOCK + 1))
* sizeof (basic_block));
data->sp = 0;
/* Allocate bitmap to track nodes that have been visited. */
- data->visited_blocks = sbitmap_alloc (last_basic_block - (INVALID_BLOCK + 1));
+ data->visited_blocks = sbitmap_alloc (n_basic_blocks - (INVALID_BLOCK + 1));
/* None of the nodes in the CFG have been visited yet. */
sbitmap_zero (data->visited_blocks);
@@ -1060,7 +1169,7 @@ flow_dfs_compute_reverse_add_bb (data, bb)
basic_block bb;
{
data->stack[data->sp++] = bb;
- SET_BIT (data->visited_blocks, bb->sindex - (INVALID_BLOCK + 1));
+ SET_BIT (data->visited_blocks, bb->index - (INVALID_BLOCK + 1));
}
/* Continue the depth-first search through the reverse graph starting with the
@@ -1074,6 +1183,7 @@ flow_dfs_compute_reverse_execute (data)
{
basic_block bb;
edge e;
+ int i;
while (data->sp > 0)
{
@@ -1082,14 +1192,14 @@ flow_dfs_compute_reverse_execute (data)
/* Perform depth-first search on adjacent vertices. */
for (e = bb->pred; e; e = e->pred_next)
if (!TEST_BIT (data->visited_blocks,
- e->src->sindex - (INVALID_BLOCK + 1)))
+ e->src->index - (INVALID_BLOCK + 1)))
flow_dfs_compute_reverse_add_bb (data, e->src);
}
/* Determine if there are unvisited basic blocks. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- if (!TEST_BIT (data->visited_blocks, bb->sindex - (INVALID_BLOCK + 1)))
- return bb;
+ for (i = n_basic_blocks - (INVALID_BLOCK + 1); --i >= 0; )
+ if (!TEST_BIT (data->visited_blocks, i))
+ return BASIC_BLOCK (i + (INVALID_BLOCK + 1));
return NULL;
}
diff --git a/gcc/cfgbuild.c b/gcc/cfgbuild.c
index d531877..5ce9d40 100644
--- a/gcc/cfgbuild.c
+++ b/gcc/cfgbuild.c
@@ -50,8 +50,7 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
static int count_basic_blocks PARAMS ((rtx));
static void find_basic_blocks_1 PARAMS ((rtx));
static rtx find_label_refs PARAMS ((rtx, rtx));
-static void make_edges PARAMS ((rtx, basic_block,
- basic_block, int));
+static void make_edges PARAMS ((rtx, int, int, int));
static void make_label_edge PARAMS ((sbitmap *, basic_block,
rtx, int));
static void make_eh_edge PARAMS ((sbitmap *, basic_block, rtx));
@@ -281,10 +280,9 @@ make_eh_edge (edge_cache, src, insn)
static void
make_edges (label_value_list, min, max, update_p)
rtx label_value_list;
- basic_block min, max;
- int update_p;
+ int min, max, update_p;
{
- basic_block bb;
+ int i;
sbitmap *edge_cache = NULL;
/* Assume no computed jump; revise as we create edges. */
@@ -295,26 +293,28 @@ make_edges (label_value_list, min, max, update_p)
amount of time searching the edge lists for duplicates. */
if (forced_labels || label_value_list)
{
- edge_cache = sbitmap_vector_alloc (last_basic_block, last_basic_block);
- sbitmap_vector_zero (edge_cache, last_basic_block);
+ edge_cache = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ sbitmap_vector_zero (edge_cache, n_basic_blocks);
if (update_p)
- FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
+ for (i = min; i <= max; ++i)
{
edge e;
- for (e = bb->succ; e ; e = e->succ_next)
+ for (e = BASIC_BLOCK (i)->succ; e ; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
- SET_BIT (edge_cache[bb->sindex], e->dest->sindex);
+ SET_BIT (edge_cache[i], e->dest->index);
}
}
- if (min == ENTRY_BLOCK_PTR->next_bb)
- cached_make_edge (edge_cache, ENTRY_BLOCK_PTR, min,
+ /* By nature of the way these get numbered, block 0 is always the entry. */
+ if (min == 0)
+ cached_make_edge (edge_cache, ENTRY_BLOCK_PTR, BASIC_BLOCK (0),
EDGE_FALLTHRU);
- FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
+ for (i = min; i <= max; ++i)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx insn, x;
enum rtx_code code;
int force_fallthru = 0;
@@ -443,16 +443,15 @@ make_edges (label_value_list, min, max, update_p)
/* Find out if we can drop through to the next block. */
insn = next_nonnote_insn (insn);
-
- if (!insn || (bb->next_bb == EXIT_BLOCK_PTR && force_fallthru))
+ if (!insn || (i + 1 == n_basic_blocks && force_fallthru))
cached_make_edge (edge_cache, bb, EXIT_BLOCK_PTR, EDGE_FALLTHRU);
- else if (bb->next_bb != EXIT_BLOCK_PTR)
+ else if (i + 1 < n_basic_blocks)
{
- rtx tmp = bb->next_bb->head;
+ rtx tmp = BLOCK_HEAD (i + 1);
if (GET_CODE (tmp) == NOTE)
tmp = next_nonnote_insn (tmp);
if (force_fallthru || insn == tmp)
- cached_make_edge (edge_cache, bb, bb->next_bb,
+ cached_make_edge (edge_cache, bb, BASIC_BLOCK (i + 1),
EDGE_FALLTHRU);
}
}
@@ -471,12 +470,12 @@ find_basic_blocks_1 (f)
rtx f;
{
rtx insn, next;
+ int i = 0;
rtx bb_note = NULL_RTX;
rtx lvl = NULL_RTX;
rtx trll = NULL_RTX;
rtx head = NULL_RTX;
rtx end = NULL_RTX;
- basic_block prev = ENTRY_BLOCK_PTR;
/* We process the instructions in a slightly different way than we did
previously. This is so that we see a NOTE_BASIC_BLOCK after we have
@@ -493,7 +492,7 @@ find_basic_blocks_1 (f)
if ((GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == BARRIER)
&& head)
{
- prev = create_basic_block_structure (last_basic_block++, head, end, bb_note, prev);
+ create_basic_block_structure (i++, head, end, bb_note);
head = end = NULL_RTX;
bb_note = NULL_RTX;
}
@@ -507,7 +506,7 @@ find_basic_blocks_1 (f)
if (head && control_flow_insn_p (insn))
{
- prev = create_basic_block_structure (last_basic_block++, head, end, bb_note, prev);
+ create_basic_block_structure (i++, head, end, bb_note);
head = end = NULL_RTX;
bb_note = NULL_RTX;
}
@@ -589,11 +588,11 @@ find_basic_blocks_1 (f)
}
if (head != NULL_RTX)
- create_basic_block_structure (last_basic_block++, head, end, bb_note, prev);
+ create_basic_block_structure (i++, head, end, bb_note);
else if (bb_note)
delete_insn (bb_note);
- if (last_basic_block != num_basic_blocks)
+ if (i != n_basic_blocks)
abort ();
label_value_list = lvl;
@@ -613,7 +612,6 @@ find_basic_blocks (f, nregs, file)
FILE *file ATTRIBUTE_UNUSED;
{
int max_uid;
- basic_block bb;
timevar_push (TV_CFG);
basic_block_for_insn = 0;
@@ -621,21 +619,20 @@ find_basic_blocks (f, nregs, file)
/* Flush out existing data. */
if (basic_block_info != NULL)
{
+ int i;
+
clear_edges ();
/* Clear bb->aux on all extant basic blocks. We'll use this as a
tag for reuse during create_basic_block, just in case some pass
copies around basic block notes improperly. */
- FOR_ALL_BB (bb)
- bb->aux = NULL;
+ for (i = 0; i < n_basic_blocks; ++i)
+ BASIC_BLOCK (i)->aux = NULL;
VARRAY_FREE (basic_block_info);
}
- num_basic_blocks = count_basic_blocks (f);
- last_basic_block = 0;
- ENTRY_BLOCK_PTR->next_bb = EXIT_BLOCK_PTR;
- EXIT_BLOCK_PTR->prev_bb = ENTRY_BLOCK_PTR;
+ n_basic_blocks = count_basic_blocks (f);
/* Size the basic block table. The actual structures will be allocated
by find_basic_blocks_1, since we want to keep the structure pointers
@@ -645,7 +642,7 @@ find_basic_blocks (f, nregs, file)
instructions at all until close to the end of compilation when we
actually lay them out. */
- VARRAY_BB_INIT (basic_block_info, num_basic_blocks, "basic_block_info");
+ VARRAY_BB_INIT (basic_block_info, n_basic_blocks, "basic_block_info");
find_basic_blocks_1 (f);
@@ -664,7 +661,7 @@ find_basic_blocks (f, nregs, file)
compute_bb_for_insn (max_uid);
/* Discover the edges of our cfg. */
- make_edges (label_value_list, ENTRY_BLOCK_PTR->next_bb, EXIT_BLOCK_PTR->prev_bb, 0);
+ make_edges (label_value_list, 0, n_basic_blocks - 1, 0);
/* Do very simple cleanup now, for the benefit of code that runs between
here and cleanup_cfg, e.g. thread_prologue_and_epilogue_insns. */
@@ -793,24 +790,25 @@ void
find_many_sub_basic_blocks (blocks)
sbitmap blocks;
{
- basic_block bb, min, max;
+ int i;
+ int min, max;
- FOR_ALL_BB (bb)
- SET_STATE (bb,
- TEST_BIT (blocks, bb->sindex) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
+ for (i = 0; i < n_basic_blocks; i++)
+ SET_STATE (BASIC_BLOCK (i),
+ TEST_BIT (blocks, i) ? BLOCK_TO_SPLIT : BLOCK_ORIGINAL);
- FOR_ALL_BB (bb)
- if (STATE (bb) == BLOCK_TO_SPLIT)
- find_bb_boundaries (bb);
+ for (i = 0; i < n_basic_blocks; i++)
+ if (STATE (BASIC_BLOCK (i)) == BLOCK_TO_SPLIT)
+ find_bb_boundaries (BASIC_BLOCK (i));
- FOR_ALL_BB (bb)
- if (STATE (bb) != BLOCK_ORIGINAL)
+ for (i = 0; i < n_basic_blocks; i++)
+ if (STATE (BASIC_BLOCK (i)) != BLOCK_ORIGINAL)
break;
- min = max = bb;
- for (; bb != EXIT_BLOCK_PTR; bb = bb->next_bb)
- if (STATE (bb) != BLOCK_ORIGINAL)
- max = bb;
+ min = max = i;
+ for (; i < n_basic_blocks; i++)
+ if (STATE (BASIC_BLOCK (i)) != BLOCK_ORIGINAL)
+ max = i;
/* Now re-scan and wire in all edges. This expect simple (conditional)
jumps at the end of each new basic blocks. */
@@ -818,28 +816,29 @@ find_many_sub_basic_blocks (blocks)
/* Update branch probabilities. Expect only (un)conditional jumps
to be created with only the forward edges. */
- FOR_BB_BETWEEN (bb, min, max->next_bb, next_bb)
+ for (i = min; i <= max; i++)
{
edge e;
+ basic_block b = BASIC_BLOCK (i);
- if (STATE (bb) == BLOCK_ORIGINAL)
+ if (STATE (b) == BLOCK_ORIGINAL)
continue;
- if (STATE (bb) == BLOCK_NEW)
+ if (STATE (b) == BLOCK_NEW)
{
- bb->count = 0;
- bb->frequency = 0;
- for (e = bb->pred; e; e=e->pred_next)
+ b->count = 0;
+ b->frequency = 0;
+ for (e = b->pred; e; e=e->pred_next)
{
- bb->count += e->count;
- bb->frequency += EDGE_FREQUENCY (e);
+ b->count += e->count;
+ b->frequency += EDGE_FREQUENCY (e);
}
}
- compute_outgoing_frequencies (bb);
+ compute_outgoing_frequencies (b);
}
- FOR_ALL_BB (bb)
- SET_STATE (bb, 0);
+ for (i = 0; i < n_basic_blocks; i++)
+ SET_STATE (BASIC_BLOCK (i), 0);
}
/* Like above but for single basic block only. */
@@ -848,12 +847,14 @@ void
find_sub_basic_blocks (bb)
basic_block bb;
{
- basic_block min, max, b;
- basic_block next = bb->next_bb;
+ int i;
+ int min, max;
+ basic_block next = (bb->index == n_basic_blocks - 1
+ ? NULL : BASIC_BLOCK (bb->index + 1));
- min = bb;
+ min = bb->index;
find_bb_boundaries (bb);
- max = next->prev_bb;
+ max = (next ? next->index : n_basic_blocks) - 1;
/* Now re-scan and wire in all edges. This expect simple (conditional)
jumps at the end of each new basic blocks. */
@@ -861,11 +862,12 @@ find_sub_basic_blocks (bb)
/* Update branch probabilities. Expect only (un)conditional jumps
to be created with only the forward edges. */
- FOR_BB_BETWEEN (b, min, max->next_bb, next_bb)
+ for (i = min; i <= max; i++)
{
edge e;
+ basic_block b = BASIC_BLOCK (i);
- if (b != min)
+ if (i != min)
{
b->count = 0;
b->frequency = 0;
diff --git a/gcc/cfgcleanup.c b/gcc/cfgcleanup.c
index 08a334a..fcf6944 100644
--- a/gcc/cfgcleanup.c
+++ b/gcc/cfgcleanup.c
@@ -147,7 +147,7 @@ try_simplify_condjump (cbranch_block)
unconditional jump. */
jump_block = cbranch_fallthru_edge->dest;
if (jump_block->pred->pred_next
- || jump_block->next_bb == EXIT_BLOCK_PTR
+ || jump_block->index == n_basic_blocks - 1
|| !FORWARDER_BLOCK_P (jump_block))
return false;
jump_dest_block = jump_block->succ->dest;
@@ -439,7 +439,7 @@ try_forward_edges (mode, b)
target = first = e->dest;
counter = 0;
- while (counter < num_basic_blocks)
+ while (counter < n_basic_blocks)
{
basic_block new_target = NULL;
bool new_target_threaded = false;
@@ -449,7 +449,7 @@ try_forward_edges (mode, b)
{
/* Bypass trivial infinite loops. */
if (target == target->succ->dest)
- counter = num_basic_blocks;
+ counter = n_basic_blocks;
new_target = target->succ->dest;
}
@@ -462,7 +462,7 @@ try_forward_edges (mode, b)
{
if (!threaded_edges)
threaded_edges = xmalloc (sizeof (*threaded_edges)
- * num_basic_blocks);
+ * n_basic_blocks);
else
{
int i;
@@ -474,7 +474,7 @@ try_forward_edges (mode, b)
break;
if (i < nthreaded_edges)
{
- counter = num_basic_blocks;
+ counter = n_basic_blocks;
break;
}
}
@@ -483,7 +483,7 @@ try_forward_edges (mode, b)
if (t->dest == b)
break;
- if (nthreaded_edges >= num_basic_blocks)
+ if (nthreaded_edges >= n_basic_blocks)
abort ();
threaded_edges[nthreaded_edges++] = t;
@@ -524,11 +524,11 @@ try_forward_edges (mode, b)
threaded |= new_target_threaded;
}
- if (counter >= num_basic_blocks)
+ if (counter >= n_basic_blocks)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Infinite loop in BB %i.\n",
- target->sindex);
+ target->index);
}
else if (target == first)
; /* We didn't do anything. */
@@ -552,7 +552,7 @@ try_forward_edges (mode, b)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Forwarding edge %i->%i to %i failed.\n",
- b->sindex, e->dest->sindex, target->sindex);
+ b->index, e->dest->index, target->index);
continue;
}
@@ -688,6 +688,7 @@ merge_blocks_move_predecessor_nojumps (a, b)
basic_block a, b;
{
rtx barrier;
+ int index;
barrier = next_nonnote_insn (a->end);
if (GET_CODE (barrier) != BARRIER)
@@ -711,11 +712,16 @@ merge_blocks_move_predecessor_nojumps (a, b)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Moved block %d before %d and merged.\n",
- a->sindex, b->sindex);
+ a->index, b->index);
- /* Swap the records for the two blocks around. */
- unlink_block (a);
- link_block (a, b->prev_bb);
+ /* Swap the records for the two blocks around. Although we are deleting B,
+ A is now where B was and we want to compact the BB array from where
+ A used to be. */
+ BASIC_BLOCK (a->index) = b;
+ BASIC_BLOCK (b->index) = a;
+ index = a->index;
+ a->index = b->index;
+ b->index = index;
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
@@ -770,7 +776,7 @@ merge_blocks_move_successor_nojumps (a, b)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Moved block %d after %d and merged.\n",
- b->sindex, a->sindex);
+ b->index, a->index);
/* Now blocks A and B are contiguous. Merge them. */
merge_blocks_nomove (a, b);
@@ -797,7 +803,7 @@ merge_blocks (e, b, c, mode)
/* If B has a fallthru edge to C, no need to move anything. */
if (e->flags & EDGE_FALLTHRU)
{
- int b_index = b->sindex, c_index = c->sindex;
+ int b_index = b->index, c_index = c->index;
merge_blocks_nomove (b, c);
update_forwarder_flag (b);
@@ -1224,7 +1230,7 @@ outgoing_edges_match (mode, bb1, bb2)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Outcomes of branch in bb %i and %i differs to much (%i %i)\n",
- bb1->sindex, bb2->sindex, b1->probability, prob2);
+ bb1->index, bb2->index, b1->probability, prob2);
return false;
}
@@ -1232,7 +1238,7 @@ outgoing_edges_match (mode, bb1, bb2)
if (rtl_dump_file && match)
fprintf (rtl_dump_file, "Conditionals in bb %i and %i match.\n",
- bb1->sindex, bb2->sindex);
+ bb1->index, bb2->index);
return match;
}
@@ -1365,14 +1371,14 @@ try_crossjump_to_edge (mode, e1, e2)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Splitting bb %i before %i insns\n",
- src2->sindex, nmatch);
+ src2->index, nmatch);
redirect_to = split_block (src2, PREV_INSN (newpos2))->dest;
}
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Cross jumping from bb %i to bb %i; %i common insns\n",
- src1->sindex, src2->sindex, nmatch);
+ src1->index, src2->index, nmatch);
redirect_to->count += src1->count;
redirect_to->frequency += src1->frequency;
@@ -1533,7 +1539,6 @@ try_crossjump_bb (mode, bb)
for (e2 = bb->pred; e2; e2 = nexte2)
{
- basic_block foll;
nexte2 = e2->pred_next;
if (e2 == e)
@@ -1547,10 +1552,7 @@ try_crossjump_bb (mode, bb)
checks of crossjump(A,B). In order to prevent redundant
checks of crossjump(B,A), require that A be the block
with the lowest index. */
- for (foll = e->src; foll && foll != e2->src; foll = foll->next_bb)
- {
- }
- if (!foll)
+ if (e->src->index > e2->src->index)
continue;
if (try_crossjump_to_edge (mode, e, e2))
@@ -1572,16 +1574,16 @@ static bool
try_optimize_cfg (mode)
int mode;
{
+ int i;
bool changed_overall = false;
bool changed;
int iterations = 0;
- basic_block bb, b;
if (mode & CLEANUP_CROSSJUMP)
add_noreturn_fake_exit_edges ();
- FOR_ALL_BB (bb)
- update_forwarder_flag (bb);
+ for (i = 0; i < n_basic_blocks; i++)
+ update_forwarder_flag (BASIC_BLOCK (i));
if (mode & CLEANUP_UPDATE_LIFE)
clear_bb_flags ();
@@ -1601,19 +1603,19 @@ try_optimize_cfg (mode)
"\n\ntry_optimize_cfg iteration %i\n\n",
iterations);
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR;)
+ for (i = 0; i < n_basic_blocks;)
{
- basic_block c;
+ basic_block c, b = BASIC_BLOCK (i);
edge s;
bool changed_here = false;
/* Delete trivially dead basic blocks. */
while (b->pred == NULL)
{
- c = b->prev_bb;
+ c = BASIC_BLOCK (b->index - 1);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleting block %i.\n",
- b->sindex);
+ b->index);
flow_delete_block (b);
changed = true;
@@ -1646,7 +1648,7 @@ try_optimize_cfg (mode)
delete_insn_chain (label, label);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Deleted label in block %i.\n",
- b->sindex);
+ b->index);
}
/* If we fall through an empty block, we can remove it. */
@@ -1657,14 +1659,14 @@ try_optimize_cfg (mode)
/* Note that forwarder_block_p true ensures that
there is a successor for this block. */
&& (b->succ->flags & EDGE_FALLTHRU)
- && num_basic_blocks > 1)
+ && n_basic_blocks > 1)
{
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Deleting fallthru block %i.\n",
- b->sindex);
+ b->index);
- c = b->prev_bb == ENTRY_BLOCK_PTR ? b->next_bb : b->prev_bb;
+ c = BASIC_BLOCK (b->index ? b->index - 1 : 1);
redirect_edge_succ_nodup (b->pred, b->succ->dest);
flow_delete_block (b);
changed = true;
@@ -1716,7 +1718,7 @@ try_optimize_cfg (mode)
/* Don't get confused by the index shift caused by
deleting blocks. */
if (!changed_here)
- b = b->next_bb;
+ i = b->index + 1;
else
changed = true;
}
@@ -1748,22 +1750,33 @@ try_optimize_cfg (mode)
bool
delete_unreachable_blocks ()
{
+ int i, j;
bool changed = false;
- basic_block b, next_bb;
find_unreachable_blocks ();
- /* Delete all unreachable basic blocks. */
+ /* Delete all unreachable basic blocks. Do compaction concurrently,
+ as otherwise we can wind up with O(N^2) behaviour here when we
+ have oodles of dead code. */
- for (b = ENTRY_BLOCK_PTR->next_bb; b != EXIT_BLOCK_PTR; b = next_bb)
+ for (i = j = 0; i < n_basic_blocks; ++i)
{
- next_bb = b->next_bb;
+ basic_block b = BASIC_BLOCK (i);
+
if (!(b->flags & BB_REACHABLE))
{
- flow_delete_block (b);
+ flow_delete_block_noexpunge (b);
+ expunge_block_nocompact (b);
changed = true;
}
+ else
+ {
+ BASIC_BLOCK (j) = b;
+ b->index = j++;
+ }
}
+ n_basic_blocks = j;
+ basic_block_info->num_elements = j;
if (changed)
tidy_fallthru_edges ();
@@ -1788,9 +1801,6 @@ cleanup_cfg (mode)
&& !reload_completed)
delete_trivially_dead_insns (get_insns(), max_reg_num ());
}
-
- compact_blocks ();
-
while (try_optimize_cfg (mode))
{
delete_unreachable_blocks (), changed = true;
diff --git a/gcc/cfglayout.c b/gcc/cfglayout.c
index 0421a72..2820f0d 100644
--- a/gcc/cfglayout.c
+++ b/gcc/cfglayout.c
@@ -86,8 +86,8 @@ skip_insns_after_block (bb)
rtx insn, last_insn, next_head, prev;
next_head = NULL_RTX;
- if (bb->next_bb != EXIT_BLOCK_PTR)
- next_head = bb->next_bb->head;
+ if (bb->index + 1 != n_basic_blocks)
+ next_head = BASIC_BLOCK (bb->index + 1)->head;
for (last_insn = insn = bb->end; (insn = NEXT_INSN (insn)) != 0; )
{
@@ -176,7 +176,7 @@ label_for_bb (bb)
if (GET_CODE (label) != CODE_LABEL)
{
if (rtl_dump_file)
- fprintf (rtl_dump_file, "Emitting label for block %d\n", bb->sindex);
+ fprintf (rtl_dump_file, "Emitting label for block %d\n", bb->index);
label = block_label (bb);
}
@@ -191,10 +191,11 @@ static void
record_effective_endpoints ()
{
rtx next_insn = get_insns ();
- basic_block bb;
-
- FOR_ALL_BB (bb)
+ int i;
+
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx end;
if (PREV_INSN (bb->head) && next_insn != bb->head)
@@ -356,15 +357,15 @@ scope_to_insns_finalize ()
static void
fixup_reorder_chain ()
{
- basic_block bb, prev_bb;
+ basic_block bb;
int index;
rtx insn = NULL;
/* First do the bulk reordering -- rechain the blocks without regard to
the needed changes to jumps and labels. */
- for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
- bb;
+ for (bb = BASIC_BLOCK (0), index = 0;
+ bb != 0;
bb = RBI (bb)->next, index++)
{
if (RBI (bb)->header)
@@ -393,7 +394,7 @@ fixup_reorder_chain ()
}
}
- if (index != num_basic_blocks)
+ if (index != n_basic_blocks)
abort ();
NEXT_INSN (insn) = function_footer;
@@ -411,7 +412,7 @@ fixup_reorder_chain ()
/* Now add jumps and labels as needed to match the blocks new
outgoing edges. */
- for (bb = ENTRY_BLOCK_PTR->next_bb; bb ; bb = RBI (bb)->next)
+ for (bb = BASIC_BLOCK (0); bb ; bb = RBI (bb)->next)
{
edge e_fall, e_taken, e;
rtx bb_end_insn;
@@ -522,39 +523,29 @@ fixup_reorder_chain ()
}
/* Put basic_block_info in the new order. */
+
if (rtl_dump_file)
{
fprintf (rtl_dump_file, "Reordered sequence:\n");
- for (bb = ENTRY_BLOCK_PTR->next_bb, index = 0;
- bb;
- bb = RBI (bb)->next, index ++)
+ for (bb = BASIC_BLOCK (0), index = 0; bb; bb = RBI (bb)->next, index ++)
{
fprintf (rtl_dump_file, " %i ", index);
if (RBI (bb)->original)
fprintf (rtl_dump_file, "duplicate of %i ",
- RBI (bb)->original->sindex);
+ RBI (bb)->original->index);
else if (forwarder_block_p (bb) && GET_CODE (bb->head) != CODE_LABEL)
fprintf (rtl_dump_file, "compensation ");
else
- fprintf (rtl_dump_file, "bb %i ", bb->sindex);
+ fprintf (rtl_dump_file, "bb %i ", bb->index);
fprintf (rtl_dump_file, " [%i]\n", bb->frequency);
}
}
- prev_bb = ENTRY_BLOCK_PTR;
- bb = ENTRY_BLOCK_PTR->next_bb;
- index = 0;
-
- for (; bb; prev_bb = bb, bb = RBI (bb)->next, index++)
+ for (bb = BASIC_BLOCK (0), index = 0; bb; bb = RBI (bb)->next, index ++)
{
- bb->sindex = index;
+ bb->index = index;
BASIC_BLOCK (index) = bb;
-
- bb->prev_bb = prev_bb;
- prev_bb->next_bb = bb;
}
- prev_bb->next_bb = EXIT_BLOCK_PTR;
- EXIT_BLOCK_PTR->prev_bb = prev_bb;
}
/* Perform sanity checks on the insn chain.
@@ -597,10 +588,11 @@ verify_insn_chain ()
static void
cleanup_unconditional_jumps ()
{
- basic_block bb;
-
- FOR_ALL_BB (bb)
+ int i;
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
+
if (!bb->succ)
continue;
if (bb->succ->flags & EDGE_FALLTHRU)
@@ -608,14 +600,13 @@ cleanup_unconditional_jumps ()
if (!bb->succ->succ_next)
{
rtx insn;
- if (GET_CODE (bb->head) != CODE_LABEL && forwarder_block_p (bb)
- && bb->prev_bb != ENTRY_BLOCK_PTR)
+ if (GET_CODE (bb->head) != CODE_LABEL && forwarder_block_p (bb) && i)
{
- basic_block prev = bb->prev_bb;
+ basic_block prev = BASIC_BLOCK (--i);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing forwarder BB %i\n",
- bb->sindex);
+ bb->index);
redirect_edge_succ (bb->pred, bb->succ->dest);
flow_delete_block (bb);
@@ -627,7 +618,7 @@ cleanup_unconditional_jumps ()
if (rtl_dump_file)
fprintf (rtl_dump_file, "Removing jump %i in BB %i\n",
- INSN_UID (jump), bb->sindex);
+ INSN_UID (jump), bb->index);
delete_insn (jump);
bb->succ->flags |= EDGE_FALLTHRU;
}
@@ -672,7 +663,7 @@ fixup_fallthru_exit_predecessor ()
if (bb && RBI (bb)->next)
{
- basic_block c = ENTRY_BLOCK_PTR->next_bb;
+ basic_block c = BASIC_BLOCK (0);
while (RBI (c)->next != bb)
c = RBI (c)->next;
@@ -822,14 +813,14 @@ cfg_layout_redirect_edge (e, dest)
edge e;
basic_block dest;
{
+ int old_index = dest->index;
basic_block src = e->src;
- basic_block old_next_bb = src->next_bb;
/* Redirect_edge_and_branch may decide to turn branch into fallthru edge
in the case the basic block appears to be in sequence. Avoid this
transformation. */
- src->next_bb = NULL;
+ dest->index = n_basic_blocks + 1;
if (e->flags & EDGE_FALLTHRU)
{
/* In case we are redirecting fallthru edge to the branch edge
@@ -855,7 +846,7 @@ cfg_layout_redirect_edge (e, dest)
delete_barrier (NEXT_INSN (src->end));
src->succ->flags |= EDGE_FALLTHRU;
}
- src->next_bb = old_next_bb;
+ dest->index = old_index;
}
/* Create an duplicate of the basic block BB and redirect edge E into it. */
@@ -880,9 +871,8 @@ cfg_layout_duplicate_bb (bb, e)
#endif
insn = duplicate_insn_chain (bb->head, bb->end);
- new_bb = create_basic_block (insn,
- insn ? get_last_insn () : NULL,
- EXIT_BLOCK_PTR->prev_bb);
+ new_bb = create_basic_block (n_basic_blocks, insn,
+ insn ? get_last_insn () : NULL);
alloc_aux_for_block (new_bb, sizeof (struct reorder_block_def));
if (RBI (bb)->header)
diff --git a/gcc/cfgloop.c b/gcc/cfgloop.c
index 3add736..2bd0d4c 100644
--- a/gcc/cfgloop.c
+++ b/gcc/cfgloop.c
@@ -50,18 +50,17 @@ flow_loops_cfg_dump (loops, file)
FILE *file;
{
int i;
- basic_block bb;
if (! loops->num || ! file || ! loops->cfg.dom)
return;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
edge succ;
- fprintf (file, ";; %d succs { ", bb->sindex);
- for (succ = bb->succ; succ; succ = succ->succ_next)
- fprintf (file, "%d ", succ->dest->sindex);
+ fprintf (file, ";; %d succs { ", i);
+ for (succ = BASIC_BLOCK (i)->succ; succ; succ = succ->succ_next)
+ fprintf (file, "%d ", succ->dest->index);
flow_nodes_print ("} dom", loops->cfg.dom[i], file);
}
@@ -69,7 +68,7 @@ flow_loops_cfg_dump (loops, file)
if (loops->cfg.dfs_order)
{
fputs (";; DFS order: ", file);
- for (i = 0; i < num_basic_blocks; i++)
+ for (i = 0; i < n_basic_blocks; i++)
fprintf (file, "%d ", loops->cfg.dfs_order[i]);
fputs ("\n", file);
@@ -79,7 +78,7 @@ flow_loops_cfg_dump (loops, file)
if (loops->cfg.rc_order)
{
fputs (";; RC order: ", file);
- for (i = 0; i < num_basic_blocks; i++)
+ for (i = 0; i < n_basic_blocks; i++)
fprintf (file, "%d ", loops->cfg.rc_order[i]);
fputs ("\n", file);
@@ -119,9 +118,9 @@ flow_loop_dump (loop, file, loop_dump_aux, verbose)
loop->shared ? " shared" : "", loop->invalid ? " invalid" : "");
fprintf (file, ";; header %d, latch %d, pre-header %d, first %d, last %d\n",
- loop->header->sindex, loop->latch->sindex,
- loop->pre_header ? loop->pre_header->sindex : -1,
- loop->first->sindex, loop->last->sindex);
+ loop->header->index, loop->latch->index,
+ loop->pre_header ? loop->pre_header->index : -1,
+ loop->first->index, loop->last->index);
fprintf (file, ";; depth %d, level %d, outer %ld\n",
loop->depth, loop->level,
(long) (loop->outer ? loop->outer->num : -1));
@@ -186,7 +185,7 @@ flow_loops_dump (loops, file, loop_dump_aux, verbose)
smaller ? oloop : loop);
fprintf (file,
";; loop header %d shared by loops %d, %d %s\n",
- loop->header->sindex, i, j,
+ loop->header->index, i, j,
disjoint ? "disjoint" : "nested");
}
}
@@ -260,7 +259,7 @@ flow_loop_entry_edges_find (header, nodes, entry_edges)
{
basic_block src = e->src;
- if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->sindex))
+ if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index))
num_entries++;
}
@@ -274,7 +273,7 @@ flow_loop_entry_edges_find (header, nodes, entry_edges)
{
basic_block src = e->src;
- if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->sindex))
+ if (src == ENTRY_BLOCK_PTR || ! TEST_BIT (nodes, src->index))
(*entry_edges)[num_entries++] = e;
}
@@ -306,7 +305,7 @@ flow_loop_exit_edges_find (nodes, exit_edges)
{
basic_block dest = e->dest;
- if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->sindex))
+ if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index))
num_exits++;
}
});
@@ -323,7 +322,7 @@ flow_loop_exit_edges_find (nodes, exit_edges)
{
basic_block dest = e->dest;
- if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->sindex))
+ if (dest == EXIT_BLOCK_PTR || ! TEST_BIT (nodes, dest->index))
(*exit_edges)[num_exits++] = e;
}
});
@@ -345,19 +344,19 @@ flow_loop_nodes_find (header, latch, nodes)
int sp;
int num_nodes = 0;
- stack = (basic_block *) xmalloc (num_basic_blocks * sizeof (basic_block));
+ stack = (basic_block *) xmalloc (n_basic_blocks * sizeof (basic_block));
sp = 0;
/* Start with only the loop header in the set of loop nodes. */
sbitmap_zero (nodes);
- SET_BIT (nodes, header->sindex);
+ SET_BIT (nodes, header->index);
num_nodes++;
header->loop_depth++;
/* Push the loop latch on to the stack. */
- if (! TEST_BIT (nodes, latch->sindex))
+ if (! TEST_BIT (nodes, latch->index))
{
- SET_BIT (nodes, latch->sindex);
+ SET_BIT (nodes, latch->index);
latch->loop_depth++;
num_nodes++;
stack[sp++] = latch;
@@ -376,9 +375,9 @@ flow_loop_nodes_find (header, latch, nodes)
/* If each ancestor not marked as part of loop, add to set of
loop nodes and push on to stack. */
if (ancestor != ENTRY_BLOCK_PTR
- && ! TEST_BIT (nodes, ancestor->sindex))
+ && ! TEST_BIT (nodes, ancestor->index))
{
- SET_BIT (nodes, ancestor->sindex);
+ SET_BIT (nodes, ancestor->index);
ancestor->loop_depth++;
num_nodes++;
stack[sp++] = ancestor;
@@ -445,7 +444,7 @@ flow_loop_pre_header_find (header, dom)
basic_block node = e->src;
if (node != ENTRY_BLOCK_PTR
- && ! TEST_BIT (dom[node->sindex], header->sindex))
+ && ! TEST_BIT (dom[node->index], header->index))
{
if (pre_header == NULL)
pre_header = node;
@@ -600,15 +599,15 @@ flow_loop_scan (loops, loop, flags)
/* Determine which loop nodes dominate all the exits
of the loop. */
- loop->exits_doms = sbitmap_alloc (last_basic_block);
+ loop->exits_doms = sbitmap_alloc (n_basic_blocks);
sbitmap_copy (loop->exits_doms, loop->nodes);
for (j = 0; j < loop->num_exits; j++)
sbitmap_a_and_b (loop->exits_doms, loop->exits_doms,
- loops->cfg.dom[loop->exit_edges[j]->src->sindex]);
+ loops->cfg.dom[loop->exit_edges[j]->src->index]);
/* The header of a natural loop must dominate
all exits. */
- if (! TEST_BIT (loop->exits_doms, loop->header->sindex))
+ if (! TEST_BIT (loop->exits_doms, loop->header->index))
abort ();
}
@@ -636,14 +635,14 @@ flow_loops_find (loops, flags)
struct loops *loops;
int flags;
{
- int i, b;
+ int i;
+ int b;
int num_loops;
edge e;
sbitmap headers;
sbitmap *dom;
int *dfs_order;
int *rc_order;
- basic_block header;
/* This function cannot be repeatedly called with different
flags to build up the loop information. The loop tree
@@ -655,21 +654,24 @@ flow_loops_find (loops, flags)
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (num_basic_blocks == 0)
+ if (n_basic_blocks == 0)
return 0;
dfs_order = NULL;
rc_order = NULL;
/* Compute the dominators. */
- dom = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
calculate_dominance_info (NULL, dom, CDI_DOMINATORS);
/* Count the number of loop edges (back edges). This should be the
same as the number of natural loops. */
num_loops = 0;
- FOR_ALL_BB (header)
+ for (b = 0; b < n_basic_blocks; b++)
{
+ basic_block header;
+
+ header = BASIC_BLOCK (b);
header->loop_depth = 0;
for (e = header->pred; e; e = e->pred_next)
@@ -682,7 +684,10 @@ flow_loops_find (loops, flags)
loop. It also has single back edge to the header
from a latch node. Note that multiple natural loops
may share the same header. */
- if (latch != ENTRY_BLOCK_PTR && TEST_BIT (dom[latch->sindex], header->sindex))
+ if (b != header->index)
+ abort ();
+
+ if (latch != ENTRY_BLOCK_PTR && TEST_BIT (dom[latch->index], b))
num_loops++;
}
}
@@ -691,8 +696,8 @@ flow_loops_find (loops, flags)
{
/* Compute depth first search order of the CFG so that outer
natural loops will be found before inner natural loops. */
- dfs_order = (int *) xmalloc (num_basic_blocks * sizeof (int));
- rc_order = (int *) xmalloc (num_basic_blocks * sizeof (int));
+ dfs_order = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ rc_order = (int *) xmalloc (n_basic_blocks * sizeof (int));
flow_depth_first_order_compute (dfs_order, rc_order);
/* Save CFG derived information to avoid recomputing it. */
@@ -704,16 +709,16 @@ flow_loops_find (loops, flags)
loops->array
= (struct loop *) xcalloc (num_loops, sizeof (struct loop));
- headers = sbitmap_alloc (last_basic_block);
+ headers = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (headers);
- loops->shared_headers = sbitmap_alloc (last_basic_block);
+ loops->shared_headers = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (loops->shared_headers);
/* Find and record information about all the natural loops
in the CFG. */
num_loops = 0;
- for (b = num_basic_blocks - 1; b >= 0; b--)
+ for (b = n_basic_blocks - 1; b >= 0; b--)
{
basic_block latch;
@@ -733,7 +738,7 @@ flow_loops_find (loops, flags)
latch node. Note that multiple natural loops may share
the same header. */
if (header != EXIT_BLOCK_PTR
- && TEST_BIT (dom[latch->sindex], header->sindex))
+ && TEST_BIT (dom[latch->index], header->index))
{
struct loop *loop;
@@ -754,12 +759,12 @@ flow_loops_find (loops, flags)
/* Keep track of blocks that are loop headers so
that we can tell which loops should be merged. */
- if (TEST_BIT (headers, loop->header->sindex))
- SET_BIT (loops->shared_headers, loop->header->sindex);
- SET_BIT (headers, loop->header->sindex);
+ if (TEST_BIT (headers, loop->header->index))
+ SET_BIT (loops->shared_headers, loop->header->index);
+ SET_BIT (headers, loop->header->index);
/* Find nodes contained within the loop. */
- loop->nodes = sbitmap_alloc (last_basic_block);
+ loop->nodes = sbitmap_alloc (n_basic_blocks);
loop->num_nodes
= flow_loop_nodes_find (loop->header, loop->latch, loop->nodes);
@@ -780,7 +785,7 @@ flow_loops_find (loops, flags)
loops and should be merged. For now just mark loops that share
headers. */
for (i = 0; i < num_loops; i++)
- if (TEST_BIT (loops->shared_headers, loops->array[i].header->sindex))
+ if (TEST_BIT (loops->shared_headers, loops->array[i].header->index))
loops->array[i].shared = 1;
sbitmap_free (headers);
@@ -827,5 +832,5 @@ flow_loop_outside_edge_p (loop, e)
abort ();
return (e->src == ENTRY_BLOCK_PTR)
- || ! TEST_BIT (loop->nodes, e->src->sindex);
+ || ! TEST_BIT (loop->nodes, e->src->index);
}
diff --git a/gcc/cfgrtl.c b/gcc/cfgrtl.c
index 5210b03..844f5df 100644
--- a/gcc/cfgrtl.c
+++ b/gcc/cfgrtl.c
@@ -248,14 +248,12 @@ delete_insn_chain_and_edges (first, last)
the note and basic block struct in BB_NOTE, if any and do not grow
BASIC_BLOCK chain and should be used directly only by CFG construction code.
END can be NULL in to create new empty basic block before HEAD. Both END
- and HEAD can be NULL to create basic block at the end of INSN chain.
- AFTER is the basic block we should be put after. */
+ and HEAD can be NULL to create basic block at the end of INSN chain. */
basic_block
-create_basic_block_structure (index, head, end, bb_note, after)
+create_basic_block_structure (index, head, end, bb_note)
int index;
rtx head, end, bb_note;
- basic_block after;
{
basic_block bb;
@@ -311,9 +309,8 @@ create_basic_block_structure (index, head, end, bb_note, after)
bb->head = head;
bb->end = end;
- bb->sindex = index;
+ bb->index = index;
bb->flags = BB_NEW;
- link_block (bb, after);
BASIC_BLOCK (index) = bb;
if (basic_block_for_insn)
update_bb_for_insn (bb);
@@ -326,23 +323,33 @@ create_basic_block_structure (index, head, end, bb_note, after)
}
/* Create new basic block consisting of instructions in between HEAD and END
- and place it to the BB chain after block AFTER. END can be NULL in to
+ and place it to the BB chain at position INDEX. END can be NULL in to
create new empty basic block before HEAD. Both END and HEAD can be NULL to
create basic block at the end of INSN chain. */
basic_block
-create_basic_block (head, end, after)
+create_basic_block (index, head, end)
+ int index;
rtx head, end;
- basic_block after;
{
basic_block bb;
- int index = last_basic_block++;
+ int i;
+
+ /* Place the new block just after the block being split. */
+ VARRAY_GROW (basic_block_info, ++n_basic_blocks);
+
+ /* Some parts of the compiler expect blocks to be number in
+ sequential order so insert the new block immediately after the
+ block being split.. */
+ for (i = n_basic_blocks - 1; i > index; --i)
+ {
+ basic_block tmp = BASIC_BLOCK (i - 1);
- /* Place the new block to the end. */
- VARRAY_GROW (basic_block_info, last_basic_block);
+ BASIC_BLOCK (i) = tmp;
+ tmp->index = i;
+ }
- num_basic_blocks++;
- bb = create_basic_block_structure (index, head, end, NULL, after);
+ bb = create_basic_block_structure (index, head, end, NULL);
bb->aux = NULL;
return bb;
}
@@ -424,7 +431,7 @@ flow_delete_block (b)
{
int deleted_handler = flow_delete_block_noexpunge (b);
- /* Remove the basic block from the array. */
+ /* Remove the basic block from the array, and compact behind it. */
expunge_block (b);
return deleted_handler;
@@ -437,15 +444,16 @@ void
compute_bb_for_insn (max)
int max;
{
- basic_block bb;
+ int i;
if (basic_block_for_insn)
VARRAY_FREE (basic_block_for_insn);
VARRAY_BB_INIT (basic_block_for_insn, max, "basic_block_for_insn");
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; ++i)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx end = bb->end;
rtx insn;
@@ -529,7 +537,7 @@ split_block (bb, insn)
return 0;
/* Create the new basic block. */
- new_bb = create_basic_block (NEXT_INSN (insn), bb->end, bb);
+ new_bb = create_basic_block (bb->index + 1, NEXT_INSN (insn), bb->end);
new_bb->count = bb->count;
new_bb->frequency = bb->frequency;
new_bb->loop_depth = bb->loop_depth;
@@ -764,7 +772,7 @@ try_redirect_by_replacing_jump (e, target)
return false;
if (rtl_dump_file)
fprintf (rtl_dump_file, "Redirecting jump %i from %i to %i.\n",
- INSN_UID (insn), e->dest->sindex, target->sindex);
+ INSN_UID (insn), e->dest->index, target->index);
if (!redirect_jump (insn, block_label (target), 0))
{
if (target == EXIT_BLOCK_PTR)
@@ -961,7 +969,7 @@ redirect_edge_and_branch (e, target)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Edge %i->%i redirected to %i\n",
- e->src->sindex, e->dest->sindex, target->sindex);
+ e->src->index, e->dest->index, target->index);
if (e->dest != target)
redirect_edge_succ_nodup (e, target);
@@ -990,7 +998,7 @@ force_nonfallthru_and_redirect (e, target)
/* We can't redirect the entry block. Create an empty block at the
start of the function which we use to add the new jump. */
edge *pe1;
- basic_block bb = create_basic_block (e->dest->head, NULL, ENTRY_BLOCK_PTR);
+ basic_block bb = create_basic_block (0, e->dest->head, NULL);
/* Change the existing edge's source to be the new block, and add
a new edge from the entry block to the new block. */
@@ -1010,7 +1018,8 @@ force_nonfallthru_and_redirect (e, target)
{
/* Create the new structures. */
note = last_loop_beg_note (e->src->end);
- jump_block = create_basic_block (NEXT_INSN (note), NULL, e->src);
+ jump_block
+ = create_basic_block (e->src->index + 1, NEXT_INSN (note), NULL);
jump_block->count = e->count;
jump_block->frequency = EDGE_FREQUENCY (e);
jump_block->loop_depth = target->loop_depth;
@@ -1155,11 +1164,12 @@ tidy_fallthru_edge (e, b, c)
void
tidy_fallthru_edges ()
{
- basic_block b, c;
+ int i;
- for (b = ENTRY_BLOCK_PTR->next_bb, c = b->next_bb;
- c && c != EXIT_BLOCK_PTR; b = c, c = c->next_bb)
+ for (i = 1; i < n_basic_blocks; i++)
{
+ basic_block b = BASIC_BLOCK (i - 1);
+ basic_block c = BASIC_BLOCK (i);
edge s;
/* We care about simple conditional or unconditional jumps with
@@ -1194,17 +1204,11 @@ back_edge_of_syntactic_loop_p (bb1, bb2)
{
rtx insn;
int count = 0;
- basic_block bb;
- if (bb1 == bb2)
- return true;
-
- for (bb = bb1; bb && bb != bb2; bb = bb->next_bb)
- {
- }
-
- if (!bb)
+ if (bb1->index > bb2->index)
return false;
+ else if (bb1->index == bb2->index)
+ return true;
for (insn = bb1->end; insn != bb2->head && count >= 0;
insn = NEXT_INSN (insn))
@@ -1282,7 +1286,8 @@ split_edge (edge_in)
else
before = NULL_RTX;
- bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
+ bb = create_basic_block (edge_in->dest == EXIT_BLOCK_PTR ? n_basic_blocks
+ : edge_in->dest->index, before, NULL);
bb->count = edge_in->count;
bb->frequency = EDGE_FREQUENCY (edge_in);
@@ -1453,7 +1458,7 @@ commit_one_edge_insertion (e, watch_calls)
e->flags &= ~EDGE_FALLTHRU;
emit_barrier_after (last);
-
+
if (before)
delete_insn (before);
}
@@ -1476,8 +1481,8 @@ commit_edge_insertions ()
#endif
i = -1;
-
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ bb = ENTRY_BLOCK_PTR;
+ while (1)
{
edge e, next;
@@ -1487,6 +1492,10 @@ commit_edge_insertions ()
if (e->insns)
commit_one_edge_insertion (e, false);
}
+
+ if (++i >= n_basic_blocks)
+ break;
+ bb = BASIC_BLOCK (i);
}
}
@@ -1504,7 +1513,8 @@ commit_edge_insertions_watch_calls ()
#endif
i = -1;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
+ bb = ENTRY_BLOCK_PTR;
+ while (1)
{
edge e, next;
@@ -1514,6 +1524,10 @@ commit_edge_insertions_watch_calls ()
if (e->insns)
commit_one_edge_insertion (e, true);
}
+
+ if (++i >= n_basic_blocks)
+ break;
+ bb = BASIC_BLOCK (i);
}
}
@@ -1529,7 +1543,7 @@ dump_bb (bb, outf)
edge e;
fprintf (outf, ";; Basic block %d, loop depth %d, count ",
- bb->sindex, bb->loop_depth);
+ bb->index, bb->loop_depth);
fprintf (outf, HOST_WIDEST_INT_PRINT_DEC, (HOST_WIDEST_INT) bb->count);
putc ('\n', outf);
@@ -1584,6 +1598,7 @@ print_rtl_with_bb (outf, rtx_first)
fprintf (outf, "(nil)\n");
else
{
+ int i;
enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
int max_uid = get_max_uid ();
basic_block *start
@@ -1592,10 +1607,10 @@ print_rtl_with_bb (outf, rtx_first)
= (basic_block *) xcalloc (max_uid, sizeof (basic_block));
enum bb_state *in_bb_p
= (enum bb_state *) xcalloc (max_uid, sizeof (enum bb_state));
- basic_block bb;
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; i--)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx x;
start[INSN_UID (bb->head)] = bb;
@@ -1616,11 +1631,12 @@ print_rtl_with_bb (outf, rtx_first)
for (tmp_rtx = rtx_first; NULL != tmp_rtx; tmp_rtx = NEXT_INSN (tmp_rtx))
{
int did_output;
+ basic_block bb;
if ((bb = start[INSN_UID (tmp_rtx)]) != NULL)
{
fprintf (outf, ";; Start of basic block %d, registers live:",
- bb->sindex);
+ bb->index);
dump_regset (bb->global_live_at_start, outf);
putc ('\n', outf);
}
@@ -1637,7 +1653,7 @@ print_rtl_with_bb (outf, rtx_first)
if ((bb = end[INSN_UID (tmp_rtx)]) != NULL)
{
fprintf (outf, ";; End of basic block %d, registers live:\n",
- bb->sindex);
+ bb->index);
dump_regset (bb->global_live_at_end, outf);
putc ('\n', outf);
}
@@ -1702,37 +1718,16 @@ verify_flow_info ()
basic_block *bb_info, *last_visited;
size_t *edge_checksum;
rtx x;
- int num_bb_notes, err = 0;
- basic_block bb, last_bb_seen;
+ int i, last_bb_num_seen, num_bb_notes, err = 0;
bb_info = (basic_block *) xcalloc (max_uid, sizeof (basic_block));
- last_visited = (basic_block *) xcalloc (last_basic_block + 2,
+ last_visited = (basic_block *) xcalloc (n_basic_blocks + 2,
sizeof (basic_block));
- edge_checksum = (size_t *) xcalloc (last_basic_block + 2, sizeof (size_t));
-
- /* Check bb chain & numbers. */
- last_bb_seen = ENTRY_BLOCK_PTR;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb, NULL, next_bb)
- {
- if (bb != EXIT_BLOCK_PTR
- && bb != BASIC_BLOCK (bb->sindex))
- {
- error ("bb %d on wrong place", bb->sindex);
- err = 1;
- }
+ edge_checksum = (size_t *) xcalloc (n_basic_blocks + 2, sizeof (size_t));
- if (bb->prev_bb != last_bb_seen)
- {
- error ("prev_bb of %d should be %d, not %d",
- bb->sindex, last_bb_seen->sindex, bb->prev_bb->sindex);
- err = 1;
- }
-
- last_bb_seen = bb;
- }
-
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; i--)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx head = bb->head;
rtx end = bb->end;
@@ -1744,7 +1739,7 @@ verify_flow_info ()
if (!x)
{
error ("end insn %d for block %d not found in the insn stream",
- INSN_UID (end), bb->sindex);
+ INSN_UID (end), bb->index);
err = 1;
}
@@ -1758,7 +1753,7 @@ verify_flow_info ()
if (bb_info[INSN_UID (x)] != NULL)
{
error ("insn %d is in multiple basic blocks (%d and %d)",
- INSN_UID (x), bb->sindex, bb_info[INSN_UID (x)]->sindex);
+ INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index);
err = 1;
}
@@ -1770,7 +1765,7 @@ verify_flow_info ()
if (!x)
{
error ("head insn %d for block %d not found in the insn stream",
- INSN_UID (head), bb->sindex);
+ INSN_UID (head), bb->index);
err = 1;
}
@@ -1778,8 +1773,9 @@ verify_flow_info ()
}
/* Now check the basic blocks (boundaries etc.) */
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; i--)
{
+ basic_block bb = BASIC_BLOCK (i);
int n_fallthru = 0, n_eh = 0, n_call = 0, n_abnormal = 0, n_branch = 0;
edge e;
rtx note;
@@ -1799,37 +1795,37 @@ verify_flow_info ()
if (bb->count < 0)
{
error ("verify_flow_info: Wrong count of block %i %i",
- bb->sindex, (int)bb->count);
+ bb->index, (int)bb->count);
err = 1;
}
if (bb->frequency < 0)
{
error ("verify_flow_info: Wrong frequency of block %i %i",
- bb->sindex, bb->frequency);
+ bb->index, bb->frequency);
err = 1;
}
for (e = bb->succ; e; e = e->succ_next)
{
- if (last_visited [e->dest->sindex + 2] == bb)
+ if (last_visited [e->dest->index + 2] == bb)
{
error ("verify_flow_info: Duplicate edge %i->%i",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
err = 1;
}
if (e->probability < 0 || e->probability > REG_BR_PROB_BASE)
{
error ("verify_flow_info: Wrong probability of edge %i->%i %i",
- e->src->sindex, e->dest->sindex, e->probability);
+ e->src->index, e->dest->index, e->probability);
err = 1;
}
if (e->count < 0)
{
error ("verify_flow_info: Wrong count of edge %i->%i %i",
- e->src->sindex, e->dest->sindex, (int)e->count);
+ e->src->index, e->dest->index, (int)e->count);
err = 1;
}
- last_visited [e->dest->sindex + 2] = bb;
+ last_visited [e->dest->index + 2] = bb;
if (e->flags & EDGE_FALLTHRU)
n_fallthru++;
@@ -1851,11 +1847,11 @@ verify_flow_info ()
{
rtx insn;
- if (e->src->next_bb != e->dest)
+ if (e->src->index + 1 != e->dest->index)
{
error
("verify_flow_info: Incorrect blocks for fallthru %i->%i",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
err = 1;
}
else
@@ -1870,7 +1866,7 @@ verify_flow_info ()
)
{
error ("verify_flow_info: Incorrect fallthru %i->%i",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
fatal_insn ("wrong insn in the fallthru edge", insn);
err = 1;
}
@@ -1879,7 +1875,7 @@ verify_flow_info ()
if (e->src != bb)
{
error ("verify_flow_info: Basic block %d succ edge is corrupted",
- bb->sindex);
+ bb->index);
fprintf (stderr, "Predecessor: ");
dump_edge_info (stderr, e, 0);
fprintf (stderr, "\nSuccessor: ");
@@ -1888,13 +1884,13 @@ verify_flow_info ()
err = 1;
}
- edge_checksum[e->dest->sindex + 2] += (size_t) e;
+ edge_checksum[e->dest->index + 2] += (size_t) e;
}
if (n_eh && GET_CODE (PATTERN (bb->end)) != RESX
&& !find_reg_note (bb->end, REG_EH_REGION, NULL_RTX))
{
- error ("Missing REG_EH_REGION note in the end of bb %i", bb->sindex);
+ error ("Missing REG_EH_REGION note in the end of bb %i", bb->index);
err = 1;
}
if (n_branch
@@ -1902,28 +1898,28 @@ verify_flow_info ()
|| (n_branch > 1 && (any_uncondjump_p (bb->end)
|| any_condjump_p (bb->end)))))
{
- error ("Too many outgoing branch edges from bb %i", bb->sindex);
+ error ("Too many outgoing branch edges from bb %i", bb->index);
err = 1;
}
if (n_fallthru && any_uncondjump_p (bb->end))
{
- error ("Fallthru edge after unconditional jump %i", bb->sindex);
+ error ("Fallthru edge after unconditional jump %i", bb->index);
err = 1;
}
if (n_branch != 1 && any_uncondjump_p (bb->end))
{
- error ("Wrong amount of branch edges after unconditional jump %i", bb->sindex);
+ error ("Wrong amount of branch edges after unconditional jump %i", bb->index);
err = 1;
}
if (n_branch != 1 && any_condjump_p (bb->end)
- && JUMP_LABEL (bb->end) != bb->next_bb->head)
+ && JUMP_LABEL (bb->end) != BASIC_BLOCK (bb->index + 1)->head)
{
- error ("Wrong amount of branch edges after conditional jump %i", bb->sindex);
+ error ("Wrong amount of branch edges after conditional jump %i", bb->index);
err = 1;
}
if (n_call && GET_CODE (bb->end) != CALL_INSN)
{
- error ("Call edges for non-call insn in bb %i", bb->sindex);
+ error ("Call edges for non-call insn in bb %i", bb->index);
err = 1;
}
if (n_abnormal
@@ -1932,7 +1928,7 @@ verify_flow_info ()
|| any_condjump_p (bb->end)
|| any_uncondjump_p (bb->end)))
{
- error ("Abnormal edges for no purpose in bb %i", bb->sindex);
+ error ("Abnormal edges for no purpose in bb %i", bb->index);
err = 1;
}
@@ -1947,7 +1943,7 @@ verify_flow_info ()
|| (GET_CODE (insn) == NOTE
&& NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK))
{
- error ("missing barrier after block %i", bb->sindex);
+ error ("missing barrier after block %i", bb->index);
err = 1;
break;
}
@@ -1957,7 +1953,7 @@ verify_flow_info ()
{
if (e->dest != bb)
{
- error ("basic block %d pred edge is corrupted", bb->sindex);
+ error ("basic block %d pred edge is corrupted", bb->index);
fputs ("Predecessor: ", stderr);
dump_edge_info (stderr, e, 0);
fputs ("\nSuccessor: ", stderr);
@@ -1965,7 +1961,7 @@ verify_flow_info ()
fputc ('\n', stderr);
err = 1;
}
- edge_checksum[e->dest->sindex + 2] -= (size_t) e;
+ edge_checksum[e->dest->index + 2] -= (size_t) e;
}
for (x = bb->head; x != NEXT_INSN (bb->end); x = NEXT_INSN (x))
@@ -1975,11 +1971,11 @@ verify_flow_info ()
if (! BLOCK_FOR_INSN (x))
error
("insn %d inside basic block %d but block_for_insn is NULL",
- INSN_UID (x), bb->sindex);
+ INSN_UID (x), bb->index);
else
error
("insn %d inside basic block %d but block_for_insn is %i",
- INSN_UID (x), bb->sindex, BLOCK_FOR_INSN (x)->sindex);
+ INSN_UID (x), bb->index, BLOCK_FOR_INSN (x)->index);
err = 1;
}
@@ -1993,7 +1989,7 @@ verify_flow_info ()
if (bb->end == x)
{
error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
- bb->sindex);
+ bb->index);
err = 1;
}
@@ -2003,7 +1999,7 @@ verify_flow_info ()
if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
{
error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
- bb->sindex);
+ bb->index);
err = 1;
}
@@ -2016,7 +2012,7 @@ verify_flow_info ()
if (NOTE_INSN_BASIC_BLOCK_P (x))
{
error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
- INSN_UID (x), bb->sindex);
+ INSN_UID (x), bb->index);
err = 1;
}
@@ -2027,7 +2023,7 @@ verify_flow_info ()
|| GET_CODE (x) == CODE_LABEL
|| GET_CODE (x) == BARRIER)
{
- error ("in basic block %d:", bb->sindex);
+ error ("in basic block %d:", bb->index);
fatal_insn ("flow control insn inside a basic block", x);
}
}
@@ -2038,33 +2034,32 @@ verify_flow_info ()
edge e;
for (e = ENTRY_BLOCK_PTR->succ; e ; e = e->succ_next)
- edge_checksum[e->dest->sindex + 2] += (size_t) e;
+ edge_checksum[e->dest->index + 2] += (size_t) e;
for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
- edge_checksum[e->dest->sindex + 2] -= (size_t) e;
+ edge_checksum[e->dest->index + 2] -= (size_t) e;
}
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- if (edge_checksum[bb->sindex + 2])
+ for (i = -2; i < n_basic_blocks; ++i)
+ if (edge_checksum[i + 2])
{
- error ("basic block %i edge lists are corrupted", bb->sindex);
+ error ("basic block %i edge lists are corrupted", i);
err = 1;
}
+ last_bb_num_seen = -1;
num_bb_notes = 0;
- last_bb_seen = ENTRY_BLOCK_PTR;
-
for (x = rtx_first; x; x = NEXT_INSN (x))
{
if (NOTE_INSN_BASIC_BLOCK_P (x))
{
- bb = NOTE_BASIC_BLOCK (x);
+ basic_block bb = NOTE_BASIC_BLOCK (x);
num_bb_notes++;
- if (bb != last_bb_seen->next_bb)
+ if (bb->index != last_bb_num_seen + 1)
internal_error ("basic blocks not numbered consecutively");
- last_bb_seen = bb;
+ last_bb_num_seen = bb->index;
}
if (!bb_info[INSN_UID (x)])
@@ -2098,10 +2093,10 @@ verify_flow_info ()
fatal_insn ("return not followed by barrier", x);
}
- if (num_bb_notes != num_basic_blocks)
+ if (num_bb_notes != n_basic_blocks)
internal_error
- ("number of bb notes in insn chain (%d) != num_basic_blocks (%d)",
- num_bb_notes, num_basic_blocks);
+ ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
+ num_bb_notes, n_basic_blocks);
if (err)
internal_error ("verify_flow_info failed");
@@ -2220,7 +2215,7 @@ purge_dead_edges (bb)
return purged;
if (rtl_dump_file)
- fprintf (rtl_dump_file, "Purged edges from bb %i\n", bb->sindex);
+ fprintf (rtl_dump_file, "Purged edges from bb %i\n", bb->index);
if (!optimize)
return purged;
@@ -2279,7 +2274,7 @@ purge_dead_edges (bb)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Purged non-fallthru edges from bb %i\n",
- bb->sindex);
+ bb->index);
return purged;
}
@@ -2290,23 +2285,22 @@ bool
purge_all_dead_edges (update_life_p)
int update_life_p;
{
- int purged = false;
+ int i, purged = false;
sbitmap blocks = 0;
- basic_block bb;
if (update_life_p)
{
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (blocks);
}
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
- bool purged_here = purge_dead_edges (bb);
+ bool purged_here = purge_dead_edges (BASIC_BLOCK (i));
purged |= purged_here;
if (purged_here && update_life_p)
- SET_BIT (blocks, bb->sindex);
+ SET_BIT (blocks, i);
}
if (update_life_p && purged)
diff --git a/gcc/combine.c b/gcc/combine.c
index aaf1bcf..7a5604f 100644
--- a/gcc/combine.c
+++ b/gcc/combine.c
@@ -192,8 +192,8 @@ static HARD_REG_SET newpat_used_regs;
static rtx added_links_insn;
-/* Basic block which we are performing combines. */
-static basic_block this_basic_block;
+/* Basic block number of the block in which we are performing combines. */
+static int this_basic_block;
/* A bitmap indicating which blocks had registers go dead at entry.
After combine, we'll need to re-do global life analysis with
@@ -578,7 +578,7 @@ combine_instructions (f, nregs)
setup_incoming_promotions ();
- refresh_blocks = sbitmap_alloc (last_basic_block);
+ refresh_blocks = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (refresh_blocks);
need_refresh = 0;
@@ -610,138 +610,139 @@ combine_instructions (f, nregs)
/* Now scan all the insns in forward order. */
+ this_basic_block = -1;
label_tick = 1;
last_call_cuid = 0;
mem_last_set = 0;
init_reg_last_arrays ();
setup_incoming_promotions ();
- FOR_ALL_BB (this_basic_block)
+ for (insn = f; insn; insn = next ? next : NEXT_INSN (insn))
{
- for (insn = this_basic_block->head;
- insn != NEXT_INSN (this_basic_block->end);
- insn = next ? next : NEXT_INSN (insn))
+ next = 0;
+
+ /* If INSN starts a new basic block, update our basic block number. */
+ if (this_basic_block + 1 < n_basic_blocks
+ && BLOCK_HEAD (this_basic_block + 1) == insn)
+ this_basic_block++;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ label_tick++;
+
+ else if (INSN_P (insn))
{
- next = 0;
+ /* See if we know about function return values before this
+ insn based upon SUBREG flags. */
+ check_promoted_subreg (insn, PATTERN (insn));
- if (GET_CODE (insn) == CODE_LABEL)
- label_tick++;
+ /* Try this insn with each insn it links back to. */
- else if (INSN_P (insn))
- {
- /* See if we know about function return values before this
- insn based upon SUBREG flags. */
- check_promoted_subreg (insn, PATTERN (insn));
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ if ((next = try_combine (insn, XEXP (links, 0),
+ NULL_RTX, &new_direct_jump_p)) != 0)
+ goto retry;
- /* Try this insn with each insn it links back to. */
+ /* Try each sequence of three linked insns ending with this one. */
- for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
- if ((next = try_combine (insn, XEXP (links, 0),
- NULL_RTX, &new_direct_jump_p)) != 0)
- goto retry;
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ {
+ rtx link = XEXP (links, 0);
- /* Try each sequence of three linked insns ending with this one. */
+ /* If the linked insn has been replaced by a note, then there
+ is no point in pursuing this chain any further. */
+ if (GET_CODE (link) == NOTE)
+ continue;
- for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
- {
- rtx link = XEXP (links, 0);
-
- /* If the linked insn has been replaced by a note, then there
- is no point in pursuing this chain any further. */
- if (GET_CODE (link) == NOTE)
- continue;
-
- for (nextlinks = LOG_LINKS (link);
- nextlinks;
- nextlinks = XEXP (nextlinks, 1))
- if ((next = try_combine (insn, link,
- XEXP (nextlinks, 0),
- &new_direct_jump_p)) != 0)
- goto retry;
- }
+ for (nextlinks = LOG_LINKS (link);
+ nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, link,
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
+ goto retry;
+ }
#ifdef HAVE_cc0
- /* Try to combine a jump insn that uses CC0
- with a preceding insn that sets CC0, and maybe with its
- logical predecessor as well.
- This is how we make decrement-and-branch insns.
- We need this special code because data flow connections
- via CC0 do not get entered in LOG_LINKS. */
-
- if (GET_CODE (insn) == JUMP_INSN
- && (prev = prev_nonnote_insn (insn)) != 0
- && GET_CODE (prev) == INSN
- && sets_cc0_p (PATTERN (prev)))
- {
- if ((next = try_combine (insn, prev,
- NULL_RTX, &new_direct_jump_p)) != 0)
- goto retry;
-
- for (nextlinks = LOG_LINKS (prev); nextlinks;
- nextlinks = XEXP (nextlinks, 1))
- if ((next = try_combine (insn, prev,
- XEXP (nextlinks, 0),
- &new_direct_jump_p)) != 0)
- goto retry;
- }
+ /* Try to combine a jump insn that uses CC0
+ with a preceding insn that sets CC0, and maybe with its
+ logical predecessor as well.
+ This is how we make decrement-and-branch insns.
+ We need this special code because data flow connections
+ via CC0 do not get entered in LOG_LINKS. */
+
+ if (GET_CODE (insn) == JUMP_INSN
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev)))
+ {
+ if ((next = try_combine (insn, prev,
+ NULL_RTX, &new_direct_jump_p)) != 0)
+ goto retry;
+
+ for (nextlinks = LOG_LINKS (prev); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, prev,
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
+ goto retry;
+ }
- /* Do the same for an insn that explicitly references CC0. */
- if (GET_CODE (insn) == INSN
- && (prev = prev_nonnote_insn (insn)) != 0
- && GET_CODE (prev) == INSN
- && sets_cc0_p (PATTERN (prev))
- && GET_CODE (PATTERN (insn)) == SET
- && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
- {
- if ((next = try_combine (insn, prev,
- NULL_RTX, &new_direct_jump_p)) != 0)
- goto retry;
-
- for (nextlinks = LOG_LINKS (prev); nextlinks;
- nextlinks = XEXP (nextlinks, 1))
- if ((next = try_combine (insn, prev,
- XEXP (nextlinks, 0),
- &new_direct_jump_p)) != 0)
- goto retry;
- }
+ /* Do the same for an insn that explicitly references CC0. */
+ if (GET_CODE (insn) == INSN
+ && (prev = prev_nonnote_insn (insn)) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev))
+ && GET_CODE (PATTERN (insn)) == SET
+ && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
+ {
+ if ((next = try_combine (insn, prev,
+ NULL_RTX, &new_direct_jump_p)) != 0)
+ goto retry;
- /* Finally, see if any of the insns that this insn links to
- explicitly references CC0. If so, try this insn, that insn,
- and its predecessor if it sets CC0. */
- for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
- if (GET_CODE (XEXP (links, 0)) == INSN
- && GET_CODE (PATTERN (XEXP (links, 0))) == SET
- && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
- && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
- && GET_CODE (prev) == INSN
- && sets_cc0_p (PATTERN (prev))
- && (next = try_combine (insn, XEXP (links, 0),
- prev, &new_direct_jump_p)) != 0)
+ for (nextlinks = LOG_LINKS (prev); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, prev,
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
goto retry;
+ }
+
+ /* Finally, see if any of the insns that this insn links to
+ explicitly references CC0. If so, try this insn, that insn,
+ and its predecessor if it sets CC0. */
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ if (GET_CODE (XEXP (links, 0)) == INSN
+ && GET_CODE (PATTERN (XEXP (links, 0))) == SET
+ && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
+ && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
+ && GET_CODE (prev) == INSN
+ && sets_cc0_p (PATTERN (prev))
+ && (next = try_combine (insn, XEXP (links, 0),
+ prev, &new_direct_jump_p)) != 0)
+ goto retry;
#endif
- /* Try combining an insn with two different insns whose results it
- uses. */
- for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
- for (nextlinks = XEXP (links, 1); nextlinks;
- nextlinks = XEXP (nextlinks, 1))
- if ((next = try_combine (insn, XEXP (links, 0),
- XEXP (nextlinks, 0),
- &new_direct_jump_p)) != 0)
- goto retry;
+ /* Try combining an insn with two different insns whose results it
+ uses. */
+ for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
+ for (nextlinks = XEXP (links, 1); nextlinks;
+ nextlinks = XEXP (nextlinks, 1))
+ if ((next = try_combine (insn, XEXP (links, 0),
+ XEXP (nextlinks, 0),
+ &new_direct_jump_p)) != 0)
+ goto retry;
- if (GET_CODE (insn) != NOTE)
- record_dead_and_set_regs (insn);
+ if (GET_CODE (insn) != NOTE)
+ record_dead_and_set_regs (insn);
- retry:
- ;
- }
+ retry:
+ ;
}
}
clear_bb_flags ();
- EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, i,
- BASIC_BLOCK (i)->flags |= BB_DIRTY);
+ EXECUTE_IF_SET_IN_SBITMAP (refresh_blocks, 0, this_basic_block,
+ BASIC_BLOCK (this_basic_block)->flags |= BB_DIRTY);
new_direct_jump_p |= purge_all_dead_edges (0);
delete_noop_moves (f);
@@ -859,7 +860,7 @@ set_nonzero_bits_and_sign_copies (x, set, data)
&& REGNO (x) >= FIRST_PSEUDO_REGISTER
/* If this register is undefined at the start of the file, we can't
say what its contents were. */
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, REGNO (x))
+ && ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start, REGNO (x))
&& GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
{
if (set == 0 || GET_CODE (set) == CLOBBER)
@@ -2389,8 +2390,8 @@ try_combine (i3, i2, i1, new_direct_jump_p)
which we know will be a NOTE. */
for (insn = NEXT_INSN (i3);
- insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
- || insn != this_basic_block->next_bb->head);
+ insn && (this_basic_block == n_basic_blocks - 1
+ || insn != BLOCK_HEAD (this_basic_block + 1));
insn = NEXT_INSN (insn))
{
if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
@@ -2607,8 +2608,8 @@ try_combine (i3, i2, i1, new_direct_jump_p)
&& ! find_reg_note (i2, REG_UNUSED,
SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
for (temp = NEXT_INSN (i2);
- temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
- || this_basic_block->head != temp);
+ temp && (this_basic_block == n_basic_blocks - 1
+ || BLOCK_HEAD (this_basic_block) != temp);
temp = NEXT_INSN (temp))
if (temp != i3 && INSN_P (temp))
for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
@@ -8069,7 +8070,7 @@ nonzero_bits (x, mode)
&& (reg_last_set_label[REGNO (x)] == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
+ && ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
REGNO (x))))
&& INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
return reg_last_set_nonzero_bits[REGNO (x)] & nonzero;
@@ -8484,7 +8485,7 @@ num_sign_bit_copies (x, mode)
&& (reg_last_set_label[REGNO (x)] == label_tick
|| (REGNO (x) >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (REGNO (x)) == 1
- && ! REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start,
+ && ! REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start,
REGNO (x))))
&& INSN_CUID (reg_last_set[REGNO (x)]) < subst_low_cuid)
return reg_last_set_sign_bit_copies[REGNO (x)];
@@ -11493,7 +11494,7 @@ get_last_value_validate (loc, insn, tick, replace)
|| (! (regno >= FIRST_PSEUDO_REGISTER
&& REG_N_SETS (regno) == 1
&& (! REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))
+ (BASIC_BLOCK (0)->global_live_at_start, regno)))
&& reg_last_set_label[j] > tick))
{
if (replace)
@@ -11567,7 +11568,7 @@ get_last_value (x)
&& (regno < FIRST_PSEUDO_REGISTER
|| REG_N_SETS (regno) != 1
|| (REGNO_REG_SET_P
- (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno)))))
+ (BASIC_BLOCK (0)->global_live_at_start, regno)))))
return 0;
/* If the value was set in a later insn than the ones we are processing,
@@ -11686,7 +11687,7 @@ reg_dead_at_p (reg, insn)
rtx reg;
rtx insn;
{
- basic_block block;
+ int block;
unsigned int i;
/* Set variables for reg_dead_at_p_1. */
@@ -11721,19 +11722,19 @@ reg_dead_at_p (reg, insn)
/* Get the basic block number that we were in. */
if (insn == 0)
- block = ENTRY_BLOCK_PTR->next_bb;
+ block = 0;
else
{
- FOR_ALL_BB (block)
- if (insn == block->head)
+ for (block = 0; block < n_basic_blocks; block++)
+ if (insn == BLOCK_HEAD (block))
break;
- if (block == EXIT_BLOCK_PTR)
+ if (block == n_basic_blocks)
return 0;
}
for (i = reg_dead_regno; i < reg_dead_endregno; i++)
- if (REGNO_REG_SET_P (block->global_live_at_start, i))
+ if (REGNO_REG_SET_P (BASIC_BLOCK (block)->global_live_at_start, i))
return 0;
return 1;
@@ -12376,7 +12377,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (place == 0)
{
- basic_block bb = this_basic_block;
+ basic_block bb = BASIC_BLOCK (this_basic_block);
for (tem = PREV_INSN (i3); place == 0; tem = PREV_INSN (tem))
{
@@ -12520,7 +12521,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
&& REGNO_REG_SET_P (bb->global_live_at_start,
REGNO (XEXP (note, 0))))
{
- SET_BIT (refresh_blocks, this_basic_block->sindex);
+ SET_BIT (refresh_blocks, this_basic_block);
need_refresh = 1;
}
}
@@ -12540,7 +12541,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
after we remove them in delete_noop_moves. */
if (noop_move_p (place))
{
- SET_BIT (refresh_blocks, this_basic_block->sindex);
+ SET_BIT (refresh_blocks, this_basic_block);
need_refresh = 1;
}
@@ -12590,7 +12591,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
i += HARD_REGNO_NREGS (i, reg_raw_mode[i]))
{
rtx piece = gen_rtx_REG (reg_raw_mode[i], i);
- basic_block bb = this_basic_block;
+ basic_block bb = BASIC_BLOCK (this_basic_block);
if (! dead_or_set_p (place, piece)
&& ! reg_bitfield_target_p (piece,
@@ -12613,7 +12614,7 @@ distribute_notes (notes, from_insn, i3, i2, elim_i2, elim_i1)
if (tem == bb->head)
{
SET_BIT (refresh_blocks,
- this_basic_block->sindex);
+ this_basic_block);
need_refresh = 1;
break;
}
@@ -12718,8 +12719,8 @@ distribute_links (links)
since most links don't point very far away. */
for (insn = NEXT_INSN (XEXP (link, 0));
- (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
- || this_basic_block->next_bb->head != insn));
+ (insn && (this_basic_block == n_basic_blocks - 1
+ || BLOCK_HEAD (this_basic_block + 1) != insn));
insn = NEXT_INSN (insn))
if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
{
diff --git a/gcc/conflict.c b/gcc/conflict.c
index 765b4e4..d1fb129 100644
--- a/gcc/conflict.c
+++ b/gcc/conflict.c
@@ -447,18 +447,19 @@ conflict_graph_compute (regs, p)
regset regs;
partition p;
{
+ int b;
conflict_graph graph = conflict_graph_new (max_reg_num ());
regset_head live_head;
regset live = &live_head;
regset_head born_head;
regset born = &born_head;
- basic_block bb;
INIT_REG_SET (live);
INIT_REG_SET (born);
- FOR_ALL_BB_REVERSE (bb)
+ for (b = n_basic_blocks; --b >= 0; )
{
+ basic_block bb = BASIC_BLOCK (b);
rtx insn;
rtx head;
diff --git a/gcc/df.c b/gcc/df.c
index c598faf..be36feb 100644
--- a/gcc/df.c
+++ b/gcc/df.c
@@ -171,6 +171,12 @@ Perhaps there should be a bitmap argument to df_analyse to specify
#include "df.h"
#include "fibheap.h"
+#define FOR_ALL_BBS(BB, CODE) \
+do { \
+ int node_; \
+ for (node_ = 0; node_ < n_basic_blocks; node_++) \
+ {(BB) = BASIC_BLOCK (node_); CODE;};} while (0)
+
#define FOR_EACH_BB_IN_BITMAP(BITMAP, MIN, BB, CODE) \
do { \
unsigned int node_; \
@@ -400,8 +406,8 @@ df_bitmaps_alloc (df, flags)
struct df *df;
int flags;
{
+ unsigned int i;
int dflags = 0;
- basic_block bb;
/* Free the bitmaps if they need resizing. */
if ((flags & DF_LR) && df->n_regs < (unsigned int)max_reg_num ())
@@ -417,8 +423,9 @@ df_bitmaps_alloc (df, flags)
df->n_defs = df->def_id;
df->n_uses = df->use_id;
- FOR_ALL_BB (bb)
+ for (i = 0; i < df->n_bbs; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (flags & DF_RD && ! bb_info->rd_in)
@@ -467,10 +474,11 @@ df_bitmaps_free (df, flags)
struct df *df ATTRIBUTE_UNUSED;
int flags;
{
- basic_block bb;
+ unsigned int i;
- FOR_ALL_BB (bb)
+ for (i = 0; i < df->n_bbs; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (!bb_info)
@@ -526,7 +534,7 @@ df_alloc (df, n_regs)
int n_regs;
{
int n_insns;
- basic_block bb;
+ int i;
gcc_obstack_init (&df_ref_obstack);
@@ -547,7 +555,7 @@ df_alloc (df, n_regs)
df->uses = xmalloc (df->use_size * sizeof (*df->uses));
df->n_regs = n_regs;
- df->n_bbs = last_basic_block;
+ df->n_bbs = n_basic_blocks;
/* Allocate temporary working array used during local dataflow analysis. */
df->reg_def_last = xmalloc (df->n_regs * sizeof (struct ref *));
@@ -561,11 +569,11 @@ df_alloc (df, n_regs)
df->flags = 0;
- df->bbs = xcalloc (last_basic_block, sizeof (struct bb_info));
+ df->bbs = xcalloc (df->n_bbs, sizeof (struct bb_info));
df->all_blocks = BITMAP_XMALLOC ();
- FOR_ALL_BB (bb)
- bitmap_set_bit (df->all_blocks, bb->sindex);
+ for (i = 0; i < n_basic_blocks; i++)
+ bitmap_set_bit (df->all_blocks, i);
}
@@ -1938,10 +1946,8 @@ df_analyse_1 (df, blocks, flags, update)
int aflags;
int dflags;
int i;
- basic_block bb;
-
dflags = 0;
- aflags = flags;
+ aflags = flags;
if (flags & DF_UD_CHAIN)
aflags |= DF_RD | DF_RD_CHAIN;
@@ -2003,16 +2009,16 @@ df_analyse_1 (df, blocks, flags, update)
df_reg_use_chain_create (df, blocks);
}
- df->dfs_order = xmalloc (sizeof(int) * num_basic_blocks);
- df->rc_order = xmalloc (sizeof(int) * num_basic_blocks);
- df->rts_order = xmalloc (sizeof(int) * num_basic_blocks);
- df->inverse_dfs_map = xmalloc (sizeof(int) * last_basic_block);
- df->inverse_rc_map = xmalloc (sizeof(int) * last_basic_block);
- df->inverse_rts_map = xmalloc (sizeof(int) * last_basic_block);
-
+ df->dfs_order = xmalloc (sizeof(int) * n_basic_blocks);
+ df->rc_order = xmalloc (sizeof(int) * n_basic_blocks);
+ df->rts_order = xmalloc (sizeof(int) * n_basic_blocks);
+ df->inverse_dfs_map = xmalloc (sizeof(int) * n_basic_blocks);
+ df->inverse_rc_map = xmalloc (sizeof(int) * n_basic_blocks);
+ df->inverse_rts_map = xmalloc (sizeof(int) * n_basic_blocks);
+
flow_depth_first_order_compute (df->dfs_order, df->rc_order);
flow_reverse_top_sort_order_compute (df->rts_order);
- for (i = 0; i < num_basic_blocks; i ++)
+ for (i = 0; i < n_basic_blocks; i ++)
{
df->inverse_dfs_map[df->dfs_order[i]] = i;
df->inverse_rc_map[df->rc_order[i]] = i;
@@ -2023,16 +2029,17 @@ df_analyse_1 (df, blocks, flags, update)
/* Compute the sets of gens and kills for the defs of each bb. */
df_rd_local_compute (df, df->flags & DF_RD ? blocks : df->all_blocks);
{
- bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
- FOR_ALL_BB (bb)
+ int i;
+ bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ for (i = 0; i < n_basic_blocks; i ++)
{
- in[bb->sindex] = DF_BB_INFO (df, bb)->rd_in;
- out[bb->sindex] = DF_BB_INFO (df, bb)->rd_out;
- gen[bb->sindex] = DF_BB_INFO (df, bb)->rd_gen;
- kill[bb->sindex] = DF_BB_INFO (df, bb)->rd_kill;
+ in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_in;
+ out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_out;
+ gen[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_gen;
+ kill[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->rd_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
FORWARD, UNION, df_rd_transfer_function,
@@ -2059,16 +2066,17 @@ df_analyse_1 (df, blocks, flags, update)
uses in each bb. */
df_ru_local_compute (df, df->flags & DF_RU ? blocks : df->all_blocks);
{
- bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *gen = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *kill = xmalloc (sizeof (bitmap) * last_basic_block);
- FOR_ALL_BB (bb)
+ int i;
+ bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *gen = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *kill = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ for (i = 0; i < n_basic_blocks; i ++)
{
- in[bb->sindex] = DF_BB_INFO (df, bb)->ru_in;
- out[bb->sindex] = DF_BB_INFO (df, bb)->ru_out;
- gen[bb->sindex] = DF_BB_INFO (df, bb)->ru_gen;
- kill[bb->sindex] = DF_BB_INFO (df, bb)->ru_kill;
+ in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_in;
+ out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_out;
+ gen[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_gen;
+ kill[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->ru_kill;
}
iterative_dataflow_bitmap (in, out, gen, kill, df->all_blocks,
BACKWARD, UNION, df_ru_transfer_function,
@@ -2098,16 +2106,17 @@ df_analyse_1 (df, blocks, flags, update)
/* Compute the sets of defs and uses of live variables. */
df_lr_local_compute (df, df->flags & DF_LR ? blocks : df->all_blocks);
{
- bitmap *in = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *out = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *use = xmalloc (sizeof (bitmap) * last_basic_block);
- bitmap *def = xmalloc (sizeof (bitmap) * last_basic_block);
- FOR_ALL_BB (bb)
+ int i;
+ bitmap *in = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *out = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *use = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ bitmap *def = xmalloc (sizeof (bitmap) * n_basic_blocks);
+ for (i = 0; i < n_basic_blocks; i ++)
{
- in[bb->sindex] = DF_BB_INFO (df, bb)->lr_in;
- out[bb->sindex] = DF_BB_INFO (df, bb)->lr_out;
- use[bb->sindex] = DF_BB_INFO (df, bb)->lr_use;
- def[bb->sindex] = DF_BB_INFO (df, bb)->lr_def;
+ in[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_in;
+ out[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_out;
+ use[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_use;
+ def[i] = DF_BB_INFO (df, BASIC_BLOCK (i))->lr_def;
}
iterative_dataflow_bitmap (in, out, use, def, df->all_blocks,
BACKWARD, UNION, df_lr_transfer_function,
@@ -2261,15 +2270,12 @@ df_modified_p (df, blocks)
struct df *df;
bitmap blocks;
{
+ unsigned int j;
int update = 0;
- basic_block bb;
-
- if (!df->n_bbs)
- return 0;
- FOR_ALL_BB (bb)
- if (bitmap_bit_p (df->bbs_modified, bb->sindex)
- && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, bb->sindex)))
+ for (j = 0; j < df->n_bbs; j++)
+ if (bitmap_bit_p (df->bbs_modified, j)
+ && (! blocks || (blocks == (bitmap) -1) || bitmap_bit_p (blocks, j)))
{
update = 1;
break;
@@ -2292,7 +2298,7 @@ df_analyse (df, blocks, flags)
/* We could deal with additional basic blocks being created by
rescanning everything again. */
- if (df->n_bbs && df->n_bbs != (unsigned int) last_basic_block)
+ if (df->n_bbs && df->n_bbs != (unsigned int)n_basic_blocks)
abort ();
update = df_modified_p (df, blocks);
@@ -2402,8 +2408,10 @@ df_refs_unlink (df, blocks)
}
else
{
- FOR_ALL_BB (bb)
+ FOR_ALL_BBS (bb,
+ {
df_bb_refs_unlink (df, bb);
+ });
}
}
#endif
@@ -2451,7 +2459,7 @@ df_insn_modify (df, bb, insn)
if (uid >= df->insn_size)
df_insn_table_realloc (df, 0);
- bitmap_set_bit (df->bbs_modified, bb->sindex);
+ bitmap_set_bit (df->bbs_modified, bb->index);
bitmap_set_bit (df->insns_modified, uid);
/* For incremental updating on the fly, perhaps we could make a copy
@@ -3266,6 +3274,7 @@ df_dump (df, flags, file)
int flags;
FILE *file;
{
+ unsigned int i;
unsigned int j;
if (! df || ! file)
@@ -3277,23 +3286,22 @@ df_dump (df, flags, file)
if (flags & DF_RD)
{
- basic_block bb;
-
fprintf (file, "Reaching defs:\n");
- FOR_ALL_BB (bb)
+ for (i = 0; i < df->n_bbs; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->rd_in)
continue;
- fprintf (file, "bb %d in \t", bb->sindex);
+ fprintf (file, "bb %d in \t", i);
dump_bitmap (file, bb_info->rd_in);
- fprintf (file, "bb %d gen \t", bb->sindex);
+ fprintf (file, "bb %d gen \t", i);
dump_bitmap (file, bb_info->rd_gen);
- fprintf (file, "bb %d kill\t", bb->sindex);
+ fprintf (file, "bb %d kill\t", i);
dump_bitmap (file, bb_info->rd_kill);
- fprintf (file, "bb %d out \t", bb->sindex);
+ fprintf (file, "bb %d out \t", i);
dump_bitmap (file, bb_info->rd_out);
}
}
@@ -3320,23 +3328,22 @@ df_dump (df, flags, file)
if (flags & DF_RU)
{
- basic_block bb;
-
fprintf (file, "Reaching uses:\n");
- FOR_ALL_BB (bb)
+ for (i = 0; i < df->n_bbs; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->ru_in)
continue;
- fprintf (file, "bb %d in \t", bb->sindex);
+ fprintf (file, "bb %d in \t", i);
dump_bitmap (file, bb_info->ru_in);
- fprintf (file, "bb %d gen \t", bb->sindex);
+ fprintf (file, "bb %d gen \t", i);
dump_bitmap (file, bb_info->ru_gen);
- fprintf (file, "bb %d kill\t", bb->sindex);
+ fprintf (file, "bb %d kill\t", i);
dump_bitmap (file, bb_info->ru_kill);
- fprintf (file, "bb %d out \t", bb->sindex);
+ fprintf (file, "bb %d out \t", i);
dump_bitmap (file, bb_info->ru_out);
}
}
@@ -3363,23 +3370,22 @@ df_dump (df, flags, file)
if (flags & DF_LR)
{
- basic_block bb;
-
fprintf (file, "Live regs:\n");
- FOR_ALL_BB (bb)
+ for (i = 0; i < df->n_bbs; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
struct bb_info *bb_info = DF_BB_INFO (df, bb);
if (! bb_info->lr_in)
continue;
- fprintf (file, "bb %d in \t", bb->sindex);
+ fprintf (file, "bb %d in \t", i);
dump_bitmap (file, bb_info->lr_in);
- fprintf (file, "bb %d use \t", bb->sindex);
+ fprintf (file, "bb %d use \t", i);
dump_bitmap (file, bb_info->lr_use);
- fprintf (file, "bb %d def \t", bb->sindex);
+ fprintf (file, "bb %d def \t", i);
dump_bitmap (file, bb_info->lr_def);
- fprintf (file, "bb %d out \t", bb->sindex);
+ fprintf (file, "bb %d out \t", i);
dump_bitmap (file, bb_info->lr_out);
}
}
@@ -3402,7 +3408,7 @@ df_dump (df, flags, file)
basic_block bb = df_regno_bb (df, j);
if (bb)
- fprintf (file, " bb %d", bb->sindex);
+ fprintf (file, " bb %d", bb->index);
else
fprintf (file, " bb ?");
}
@@ -3603,11 +3609,11 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
void *data;
{
int changed;
- int i = block->sindex;
+ int i = block->index;
edge e;
- basic_block bb = block;
- SET_BIT (visited, block->sindex);
- if (TEST_BIT (pending, block->sindex))
+ basic_block bb= block;
+ SET_BIT (visited, block->index);
+ if (TEST_BIT (pending, block->index))
{
if (dir == FORWARD)
{
@@ -3620,10 +3626,10 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
- bitmap_a_or_b (in[i], in[i], out[e->src->sindex]);
+ bitmap_a_or_b (in[i], in[i], out[e->src->index]);
break;
case INTERSECTION:
- bitmap_a_and_b (in[i], in[i], out[e->src->sindex]);
+ bitmap_a_and_b (in[i], in[i], out[e->src->index]);
break;
}
}
@@ -3639,10 +3645,10 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
- bitmap_a_or_b (out[i], out[i], in[e->dest->sindex]);
+ bitmap_a_or_b (out[i], out[i], in[e->dest->index]);
break;
case INTERSECTION:
- bitmap_a_and_b (out[i], out[i], in[e->dest->sindex]);
+ bitmap_a_and_b (out[i], out[i], in[e->dest->index]);
break;
}
}
@@ -3656,18 +3662,18 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
- if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
continue;
- SET_BIT (pending, e->dest->sindex);
+ SET_BIT (pending, e->dest->index);
}
}
else
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
- if (e->src == ENTRY_BLOCK_PTR || e->dest == block)
+ if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
continue;
- SET_BIT (pending, e->src->sindex);
+ SET_BIT (pending, e->src->index);
}
}
}
@@ -3676,11 +3682,11 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
- if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
continue;
- if (!TEST_BIT (visited, e->dest->sindex))
- hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
- conf_op, transfun, visited, pending,
+ if (!TEST_BIT (visited, e->dest->index))
+ hybrid_search_bitmap (e->dest, in, out, gen, kill, dir,
+ conf_op, transfun, visited, pending,
data);
}
}
@@ -3688,9 +3694,9 @@ hybrid_search_bitmap (block, in, out, gen, kill, dir,
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
- if (e->src == ENTRY_BLOCK_PTR || e->src == block)
+ if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
continue;
- if (!TEST_BIT (visited, e->src->sindex))
+ if (!TEST_BIT (visited, e->src->index))
hybrid_search_bitmap (e->src, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
data);
@@ -3714,11 +3720,11 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
void *data;
{
int changed;
- int i = block->sindex;
+ int i = block->index;
edge e;
- basic_block bb = block;
- SET_BIT (visited, block->sindex);
- if (TEST_BIT (pending, block->sindex))
+ basic_block bb= block;
+ SET_BIT (visited, block->index);
+ if (TEST_BIT (pending, block->index))
{
if (dir == FORWARD)
{
@@ -3731,10 +3737,10 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
- sbitmap_a_or_b (in[i], in[i], out[e->src->sindex]);
+ sbitmap_a_or_b (in[i], in[i], out[e->src->index]);
break;
case INTERSECTION:
- sbitmap_a_and_b (in[i], in[i], out[e->src->sindex]);
+ sbitmap_a_and_b (in[i], in[i], out[e->src->index]);
break;
}
}
@@ -3750,10 +3756,10 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
switch (conf_op)
{
case UNION:
- sbitmap_a_or_b (out[i], out[i], in[e->dest->sindex]);
+ sbitmap_a_or_b (out[i], out[i], in[e->dest->index]);
break;
case INTERSECTION:
- sbitmap_a_and_b (out[i], out[i], in[e->dest->sindex]);
+ sbitmap_a_and_b (out[i], out[i], in[e->dest->index]);
break;
}
}
@@ -3767,18 +3773,18 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
- if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
continue;
- SET_BIT (pending, e->dest->sindex);
+ SET_BIT (pending, e->dest->index);
}
}
else
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
- if (e->src == ENTRY_BLOCK_PTR || e->dest == block)
+ if (e->src == ENTRY_BLOCK_PTR || e->dest->index == i)
continue;
- SET_BIT (pending, e->src->sindex);
+ SET_BIT (pending, e->src->index);
}
}
}
@@ -3787,9 +3793,9 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
{
for (e = bb->succ; e != 0; e = e->succ_next)
{
- if (e->dest == EXIT_BLOCK_PTR || e->dest == block)
+ if (e->dest == EXIT_BLOCK_PTR || e->dest->index == i)
continue;
- if (!TEST_BIT (visited, e->dest->sindex))
+ if (!TEST_BIT (visited, e->dest->index))
hybrid_search_sbitmap (e->dest, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
data);
@@ -3799,9 +3805,9 @@ hybrid_search_sbitmap (block, in, out, gen, kill, dir,
{
for (e = bb->pred; e != 0; e = e->pred_next)
{
- if (e->src == ENTRY_BLOCK_PTR || e->src == block)
+ if (e->src == ENTRY_BLOCK_PTR || e->src->index == i)
continue;
- if (!TEST_BIT (visited, e->src->sindex))
+ if (!TEST_BIT (visited, e->src->index))
hybrid_search_sbitmap (e->src, in, out, gen, kill, dir,
conf_op, transfun, visited, pending,
data);
@@ -3847,8 +3853,8 @@ iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
- pending = sbitmap_alloc (last_basic_block);
- visited = sbitmap_alloc (last_basic_block);
+ pending = sbitmap_alloc (n_basic_blocks);
+ visited = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
@@ -3867,7 +3873,7 @@ iterative_dataflow_sbitmap (in, out, gen, kill, blocks,
{
i = (size_t) fibheap_extract_min (worklist);
bb = BASIC_BLOCK (i);
- if (!TEST_BIT (visited, bb->sindex))
+ if (!TEST_BIT (visited, bb->index))
hybrid_search_sbitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}
@@ -3906,8 +3912,8 @@ iterative_dataflow_bitmap (in, out, gen, kill, blocks,
fibheap_t worklist;
basic_block bb;
sbitmap visited, pending;
- pending = sbitmap_alloc (last_basic_block);
- visited = sbitmap_alloc (last_basic_block);
+ pending = sbitmap_alloc (n_basic_blocks);
+ visited = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (pending);
sbitmap_zero (visited);
worklist = fibheap_new ();
@@ -3926,7 +3932,7 @@ iterative_dataflow_bitmap (in, out, gen, kill, blocks,
{
i = (size_t) fibheap_extract_min (worklist);
bb = BASIC_BLOCK (i);
- if (!TEST_BIT (visited, bb->sindex))
+ if (!TEST_BIT (visited, bb->index))
hybrid_search_bitmap (bb, in, out, gen, kill, dir,
conf_op, transfun, visited, pending, data);
}
diff --git a/gcc/df.h b/gcc/df.h
index 5d5b8b4..7f4e4be 100644
--- a/gcc/df.h
+++ b/gcc/df.h
@@ -158,7 +158,7 @@ struct df_map
};
-#define DF_BB_INFO(REFS, BB) (&REFS->bbs[(BB)->sindex])
+#define DF_BB_INFO(REFS, BB) (&REFS->bbs[(BB)->index])
/* Macros to access the elements within the ref structure. */
@@ -175,7 +175,7 @@ struct df_map
#define DF_REF_LOC(REF) ((REF)->loc)
#endif
#define DF_REF_BB(REF) (BLOCK_FOR_INSN ((REF)->insn))
-#define DF_REF_BBNO(REF) (BLOCK_FOR_INSN ((REF)->insn)->sindex)
+#define DF_REF_BBNO(REF) (BLOCK_FOR_INSN ((REF)->insn)->index)
#define DF_REF_INSN(REF) ((REF)->insn)
#define DF_REF_INSN_UID(REF) (INSN_UID ((REF)->insn))
#define DF_REF_TYPE(REF) ((REF)->type)
diff --git a/gcc/dominance.c b/gcc/dominance.c
index a5e3f0b..3b8abdb 100644
--- a/gcc/dominance.c
+++ b/gcc/dominance.c
@@ -45,7 +45,7 @@
number of the corresponding basic block. Please note, that we include the
artificial ENTRY_BLOCK (or EXIT_BLOCK in the post-dom case) in our lists to
support multiple entry points. As it has no real basic block index we use
- 'last_basic_block' for that. Its dfs number is of course 1. */
+ 'n_basic_blocks' for that. Its dfs number is of course 1. */
/* Type of Basic Block aka. TBB */
typedef unsigned int TBB;
@@ -140,9 +140,9 @@ static void
init_dom_info (di)
struct dom_info *di;
{
- /* We need memory for num_basic_blocks nodes and the ENTRY_BLOCK or
+ /* We need memory for n_basic_blocks nodes and the ENTRY_BLOCK or
EXIT_BLOCK. */
- unsigned int num = num_basic_blocks + 2;
+ unsigned int num = n_basic_blocks + 1 + 1;
init_ar (di->dfs_parent, TBB, num, 0);
init_ar (di->path_min, TBB, num, i);
init_ar (di->key, TBB, num, i);
@@ -155,7 +155,7 @@ init_dom_info (di)
init_ar (di->set_size, unsigned int, num, 1);
init_ar (di->set_child, TBB, num, 0);
- init_ar (di->dfs_order, TBB, (unsigned int) last_basic_block + 1, 0);
+ init_ar (di->dfs_order, TBB, (unsigned int) n_basic_blocks + 1, 0);
init_ar (di->dfs_to_bb, basic_block, num, 0);
di->dfsnum = 1;
@@ -207,7 +207,7 @@ calc_dfs_tree_nonrec (di, bb, reverse)
/* Ending block. */
basic_block ex_block;
- stack = (edge *) xmalloc ((num_basic_blocks + 3) * sizeof (edge));
+ stack = (edge *) xmalloc ((n_basic_blocks + 3) * sizeof (edge));
sp = 0;
/* Initialize our border blocks, and the first edge. */
@@ -244,7 +244,7 @@ calc_dfs_tree_nonrec (di, bb, reverse)
/* If the next node BN is either already visited or a border
block the current edge is useless, and simply overwritten
with the next edge out of the current node. */
- if (bn == ex_block || di->dfs_order[bn->sindex])
+ if (bn == ex_block || di->dfs_order[bn->index])
{
e = e->pred_next;
continue;
@@ -255,7 +255,7 @@ calc_dfs_tree_nonrec (di, bb, reverse)
else
{
bn = e->dest;
- if (bn == ex_block || di->dfs_order[bn->sindex])
+ if (bn == ex_block || di->dfs_order[bn->index])
{
e = e->succ_next;
continue;
@@ -269,10 +269,10 @@ calc_dfs_tree_nonrec (di, bb, reverse)
/* Fill the DFS tree info calculatable _before_ recursing. */
if (bb != en_block)
- my_i = di->dfs_order[bb->sindex];
+ my_i = di->dfs_order[bb->index];
else
- my_i = di->dfs_order[last_basic_block];
- child_i = di->dfs_order[bn->sindex] = di->dfsnum++;
+ my_i = di->dfs_order[n_basic_blocks];
+ child_i = di->dfs_order[bn->index] = di->dfsnum++;
di->dfs_to_bb[child_i] = bn;
di->dfs_parent[child_i] = my_i;
@@ -314,7 +314,7 @@ calc_dfs_tree (di, reverse)
{
/* The first block is the ENTRY_BLOCK (or EXIT_BLOCK if REVERSE). */
basic_block begin = reverse ? EXIT_BLOCK_PTR : ENTRY_BLOCK_PTR;
- di->dfs_order[last_basic_block] = di->dfsnum;
+ di->dfs_order[n_basic_blocks] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = begin;
di->dfsnum++;
@@ -326,12 +326,13 @@ calc_dfs_tree (di, reverse)
They are reverse-unreachable. In the dom-case we disallow such
nodes, but in post-dom we have to deal with them, so we simply
include them in the DFS tree which actually becomes a forest. */
- basic_block b;
- FOR_ALL_BB_REVERSE (b)
+ int i;
+ for (i = n_basic_blocks - 1; i >= 0; i--)
{
- if (di->dfs_order[b->sindex])
+ basic_block b = BASIC_BLOCK (i);
+ if (di->dfs_order[b->index])
continue;
- di->dfs_order[b->sindex] = di->dfsnum;
+ di->dfs_order[b->index] = di->dfsnum;
di->dfs_to_bb[di->dfsnum] = b;
di->dfsnum++;
calc_dfs_tree_nonrec (di, b, reverse);
@@ -341,7 +342,7 @@ calc_dfs_tree (di, reverse)
di->nodes = di->dfsnum - 1;
/* This aborts e.g. when there is _no_ path from ENTRY to EXIT at all. */
- if (di->nodes != (unsigned int) num_basic_blocks + 1)
+ if (di->nodes != (unsigned int) n_basic_blocks + 1)
abort ();
}
@@ -493,9 +494,9 @@ calc_idoms (di, reverse)
e_next = e->pred_next;
}
if (b == en_block)
- k1 = di->dfs_order[last_basic_block];
+ k1 = di->dfs_order[n_basic_blocks];
else
- k1 = di->dfs_order[b->sindex];
+ k1 = di->dfs_order[b->index];
/* Call eval() only if really needed. If k1 is above V in DFS tree,
then we know, that eval(k1) == k1 and key[k1] == k1. */
@@ -541,20 +542,20 @@ idoms_to_doms (di, dominators)
{
TBB i, e_index;
int bb, bb_idom;
- sbitmap_vector_zero (dominators, last_basic_block);
+ sbitmap_vector_zero (dominators, n_basic_blocks);
/* We have to be careful, to not include the ENTRY_BLOCK or EXIT_BLOCK
in the list of (post)-doms, so remember that in e_index. */
- e_index = di->dfs_order[last_basic_block];
+ e_index = di->dfs_order[n_basic_blocks];
for (i = 1; i <= di->nodes; i++)
{
if (i == e_index)
continue;
- bb = di->dfs_to_bb[i]->sindex;
+ bb = di->dfs_to_bb[i]->index;
if (di->dom[i] && (di->dom[i] != e_index))
{
- bb_idom = di->dfs_to_bb[di->dom[i]]->sindex;
+ bb_idom = di->dfs_to_bb[di->dom[i]]->index;
sbitmap_copy (dominators[bb], dominators[bb_idom]);
}
else
@@ -576,8 +577,8 @@ idoms_to_doms (di, dominators)
}
/* The main entry point into this module. IDOM is an integer array with room
- for last_basic_block integers, DOMS is a preallocated sbitmap array having
- room for last_basic_block^2 bits, and POST is true if the caller wants to
+ for n_basic_blocks integers, DOMS is a preallocated sbitmap array having
+ room for n_basic_blocks^2 bits, and POST is true if the caller wants to
know post-dominators.
On return IDOM[i] will be the BB->index of the immediate (post) dominator
@@ -603,17 +604,17 @@ calculate_dominance_info (idom, doms, reverse)
if (idom)
{
- basic_block b;
-
- FOR_ALL_BB (b)
+ int i;
+ for (i = 0; i < n_basic_blocks; i++)
{
- TBB d = di.dom[di.dfs_order[b->sindex]];
+ basic_block b = BASIC_BLOCK (i);
+ TBB d = di.dom[di.dfs_order[b->index]];
/* The old code didn't modify array elements of nodes having only
itself as dominator (d==0) or only ENTRY_BLOCK (resp. EXIT_BLOCK)
(d==1). */
if (d > 1)
- idom[b->sindex] = di.dfs_to_bb[d]->sindex;
+ idom[i] = di.dfs_to_bb[d]->index;
}
}
if (doms)
diff --git a/gcc/final.c b/gcc/final.c
index 0f4464a..3e1b223 100644
--- a/gcc/final.c
+++ b/gcc/final.c
@@ -928,8 +928,8 @@ insn_current_reference_address (branch)
void
compute_alignments ()
{
+ int i;
int log, max_skip, max_log;
- basic_block bb;
if (label_align)
{
@@ -946,8 +946,9 @@ compute_alignments ()
if (! optimize || optimize_size)
return;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx label = bb->head;
int fallthru_frequency = 0, branch_frequency = 0, has_fallthru = 0;
edge e;
@@ -977,8 +978,8 @@ compute_alignments ()
if (!has_fallthru
&& (branch_frequency > BB_FREQ_MAX / 10
- || (bb->frequency > bb->prev_bb->frequency * 10
- && (bb->prev_bb->frequency
+ || (bb->frequency > BASIC_BLOCK (i - 1)->frequency * 10
+ && (BASIC_BLOCK (i - 1)->frequency
<= ENTRY_BLOCK_PTR->frequency / 2))))
{
log = JUMP_ALIGN (label);
@@ -2018,7 +2019,7 @@ final_scan_insn (insn, file, optimize, prescan, nopeepholes)
#endif
if (flag_debug_asm)
fprintf (asm_out_file, "\t%s basic block %d\n",
- ASM_COMMENT_START, NOTE_BASIC_BLOCK (insn)->sindex);
+ ASM_COMMENT_START, NOTE_BASIC_BLOCK (insn)->index);
break;
case NOTE_INSN_EH_REGION_BEG:
diff --git a/gcc/flow.c b/gcc/flow.c
index c78877d..8a9a9db 100644
--- a/gcc/flow.c
+++ b/gcc/flow.c
@@ -575,7 +575,7 @@ verify_local_live_at_start (new_live_at_start, bb)
{
fprintf (rtl_dump_file,
"live_at_start mismatch in bb %d, aborting\nNew:\n",
- bb->sindex);
+ bb->index);
debug_bitmap_file (rtl_dump_file, new_live_at_start);
fputs ("Old:\n", rtl_dump_file);
dump_bb (bb, rtl_dump_file);
@@ -656,7 +656,6 @@ update_life_info (blocks, extent, prop_flags)
for ( ; ; )
{
int changed = 0;
- basic_block bb;
calculate_global_regs_live (blocks, blocks,
prop_flags & (PROP_SCAN_DEAD_CODE
@@ -668,8 +667,9 @@ update_life_info (blocks, extent, prop_flags)
/* Removing dead code may allow the CFG to be simplified which
in turn may allow for further dead code detection / removal. */
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
+ basic_block bb = BASIC_BLOCK (i);
COPY_REG_SET (tmp, bb->global_live_at_end);
changed |= propagate_block (bb, tmp, NULL, NULL,
@@ -718,10 +718,10 @@ update_life_info (blocks, extent, prop_flags)
}
else
{
- basic_block bb;
-
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
+ basic_block bb = BASIC_BLOCK (i);
+
COPY_REG_SET (tmp, bb->global_live_at_end);
propagate_block (bb, tmp, NULL, NULL, stabilized_prop_flags);
@@ -775,16 +775,16 @@ update_life_info_in_dirty_blocks (extent, prop_flags)
enum update_life_extent extent;
int prop_flags;
{
- sbitmap update_life_blocks = sbitmap_alloc (last_basic_block);
+ sbitmap update_life_blocks = sbitmap_alloc (n_basic_blocks);
+ int block_num;
int n = 0;
- basic_block bb;
int retval = 0;
sbitmap_zero (update_life_blocks);
- FOR_ALL_BB (bb)
- if (bb->flags & BB_DIRTY)
+ for (block_num = 0; block_num < n_basic_blocks; block_num++)
+ if (BASIC_BLOCK (block_num)->flags & BB_DIRTY)
{
- SET_BIT (update_life_blocks, bb->sindex);
+ SET_BIT (update_life_blocks, block_num);
n++;
}
@@ -810,8 +810,7 @@ free_basic_block_vars (keep_head_end_p)
clear_edges ();
VARRAY_FREE (basic_block_info);
}
- num_basic_blocks = 0;
- last_basic_block = 0;
+ n_basic_blocks = 0;
ENTRY_BLOCK_PTR->aux = NULL;
ENTRY_BLOCK_PTR->global_live_at_end = NULL;
@@ -826,12 +825,14 @@ int
delete_noop_moves (f)
rtx f ATTRIBUTE_UNUSED;
{
+ int i;
rtx insn, next;
basic_block bb;
int nnoops = 0;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ bb = BASIC_BLOCK (i);
for (insn = bb->head; insn != NEXT_INSN (bb->end); insn = next)
{
next = NEXT_INSN (insn);
@@ -1078,7 +1079,7 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
sbitmap blocks_in, blocks_out;
int flags;
{
- basic_block *queue, *qhead, *qtail, *qend, bb;
+ basic_block *queue, *qhead, *qtail, *qend;
regset tmp, new_live_at_end, call_used;
regset_head tmp_head, call_used_head;
regset_head new_live_at_end_head;
@@ -1087,8 +1088,10 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
/* Some passes used to forget clear aux field of basic block causing
sick behaviour here. */
#ifdef ENABLE_CHECKING
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- if (bb->aux)
+ if (ENTRY_BLOCK_PTR->aux || EXIT_BLOCK_PTR->aux)
+ abort ();
+ for (i = 0; i < n_basic_blocks; i++)
+ if (BASIC_BLOCK (i)->aux)
abort ();
#endif
@@ -1104,28 +1107,31 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
/* Create a worklist. Allocate an extra slot for ENTRY_BLOCK, and one
because the `head == tail' style test for an empty queue doesn't
work with a full queue. */
- queue = (basic_block *) xmalloc ((num_basic_blocks + 2) * sizeof (*queue));
+ queue = (basic_block *) xmalloc ((n_basic_blocks + 2) * sizeof (*queue));
qtail = queue;
- qhead = qend = queue + num_basic_blocks + 2;
+ qhead = qend = queue + n_basic_blocks + 2;
/* Queue the blocks set in the initial mask. Do this in reverse block
number order so that we are more likely for the first round to do
useful work. We use AUX non-null to flag that the block is queued. */
if (blocks_in)
{
- FOR_ALL_BB (bb)
- if (TEST_BIT (blocks_in, bb->sindex))
- {
- *--qhead = bb;
- bb->aux = bb;
- }
- else
- bb->aux = NULL;
+ /* Clear out the garbage that might be hanging out in bb->aux. */
+ for (i = n_basic_blocks - 1; i >= 0; --i)
+ BASIC_BLOCK (i)->aux = NULL;
+
+ EXECUTE_IF_SET_IN_SBITMAP (blocks_in, 0, i,
+ {
+ basic_block bb = BASIC_BLOCK (i);
+ *--qhead = bb;
+ bb->aux = bb;
+ });
}
else
{
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; ++i)
{
+ basic_block bb = BASIC_BLOCK (i);
*--qhead = bb;
bb->aux = bb;
}
@@ -1301,7 +1307,7 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
/* Let our caller know that BB changed enough to require its
death notes updated. */
if (blocks_out)
- SET_BIT (blocks_out, bb->sindex);
+ SET_BIT (blocks_out, bb->index);
if (! rescan)
{
@@ -1357,15 +1363,16 @@ calculate_global_regs_live (blocks_in, blocks_out, flags)
{
EXECUTE_IF_SET_IN_SBITMAP (blocks_out, 0, i,
{
- bb = BASIC_BLOCK (i);
+ basic_block bb = BASIC_BLOCK (i);
FREE_REG_SET (bb->local_set);
FREE_REG_SET (bb->cond_local_set);
});
}
else
{
- FOR_ALL_BB (bb)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
+ basic_block bb = BASIC_BLOCK (i);
FREE_REG_SET (bb->local_set);
FREE_REG_SET (bb->cond_local_set);
}
@@ -1491,10 +1498,12 @@ initialize_uninitialized_subregs ()
void
allocate_bb_life_data ()
{
- basic_block bb;
+ int i;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
+
bb->global_live_at_start = OBSTACK_ALLOC_REG_SET (&flow_obstack);
bb->global_live_at_end = OBSTACK_ALLOC_REG_SET (&flow_obstack);
}
@@ -2333,14 +2342,14 @@ int
regno_uninitialized (regno)
unsigned int regno;
{
- if (num_basic_blocks == 0
+ if (n_basic_blocks == 0
|| (regno < FIRST_PSEUDO_REGISTER
&& (global_regs[regno]
|| fixed_regs[regno]
|| FUNCTION_ARG_REGNO_P (regno))))
return 0;
- return REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno);
+ return REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start, regno);
}
/* 1 if register REGNO was alive at a place where `setjmp' was called
@@ -2351,11 +2360,11 @@ int
regno_clobbered_at_setjmp (regno)
int regno;
{
- if (num_basic_blocks == 0)
+ if (n_basic_blocks == 0)
return 0;
return ((REG_N_SETS (regno) > 1
- || REGNO_REG_SET_P (ENTRY_BLOCK_PTR->next_bb->global_live_at_start, regno))
+ || REGNO_REG_SET_P (BASIC_BLOCK (0)->global_live_at_start, regno))
&& REGNO_REG_SET_P (regs_live_at_setjmp, regno));
}
@@ -2710,7 +2719,7 @@ mark_set_1 (pbi, code, reg, cond, insn, flags)
| PROP_DEATH_NOTES | PROP_AUTOINC))
{
rtx y;
- int blocknum = pbi->bb->sindex;
+ int blocknum = pbi->bb->index;
y = NULL_RTX;
if (flags & (PROP_LOG_LINKS | PROP_AUTOINC))
@@ -3567,7 +3576,7 @@ mark_used_reg (pbi, reg, cond, insn)
{
/* Keep track of which basic block each reg appears in. */
- int blocknum = pbi->bb->sindex;
+ int blocknum = pbi->bb->index;
if (REG_BASIC_BLOCK (regno_first) == REG_BLOCK_UNKNOWN)
REG_BASIC_BLOCK (regno_first) = blocknum;
else if (REG_BASIC_BLOCK (regno_first) != blocknum)
@@ -4237,16 +4246,18 @@ count_or_remove_death_notes (blocks, kill)
sbitmap blocks;
int kill;
{
- int count = 0;
- basic_block bb;
+ int i, count = 0;
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
+ basic_block bb;
rtx insn;
- if (blocks && ! TEST_BIT (blocks, bb->sindex))
+ if (blocks && ! TEST_BIT (blocks, i))
continue;
+ bb = BASIC_BLOCK (i);
+
for (insn = bb->head;; insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
diff --git a/gcc/function.c b/gcc/function.c
index 03e2081..5bd70a0 100644
--- a/gcc/function.c
+++ b/gcc/function.c
@@ -7817,7 +7817,7 @@ epilogue_done:
}
/* Find the last line number note in the first block. */
- for (insn = ENTRY_BLOCK_PTR->next_bb->end;
+ for (insn = BASIC_BLOCK (0)->end;
insn != prologue_end && insn;
insn = PREV_INSN (insn))
if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) > 0)
diff --git a/gcc/gcse.c b/gcc/gcse.c
index e4e1770..9320053 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -541,7 +541,7 @@ static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
struct null_pointer_info
{
/* The basic block being processed. */
- basic_block current_block;
+ int current_block;
/* The first register to be handled in this pass. */
unsigned int min_reg;
/* One greater than the last register to be handled in this pass. */
@@ -740,9 +740,9 @@ gcse_main (f, file)
if (file)
dump_flow_info (file);
- orig_bb_count = num_basic_blocks;
+ orig_bb_count = n_basic_blocks;
/* Return if there's nothing to do. */
- if (num_basic_blocks <= 1)
+ if (n_basic_blocks <= 1)
return 0;
/* Trying to perform global optimizations on flow graphs which have
@@ -753,23 +753,23 @@ gcse_main (f, file)
as blocks. But we do not want to punish small functions which have
a couple switch statements. So we require a relatively large number
of basic blocks and the ratio of edges to blocks to be high. */
- if (num_basic_blocks > 1000 && n_edges / num_basic_blocks >= 20)
+ if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
{
if (warn_disabled_optimization)
warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
- num_basic_blocks, n_edges / num_basic_blocks);
+ n_basic_blocks, n_edges / n_basic_blocks);
return 0;
}
/* If allocating memory for the cprop bitmap would take up too much
storage it's better just to disable the optimization. */
- if ((num_basic_blocks
+ if ((n_basic_blocks
* SBITMAP_SET_SIZE (max_gcse_regno)
* sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
{
if (warn_disabled_optimization)
warning ("GCSE disabled: %d basic blocks and %d registers",
- num_basic_blocks, max_gcse_regno);
+ n_basic_blocks, max_gcse_regno);
return 0;
}
@@ -834,12 +834,12 @@ gcse_main (f, file)
{
free_modify_mem_tables ();
modify_mem_list
- = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
canon_modify_mem_list
- = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
- memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
- memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
- orig_bb_count = num_basic_blocks;
+ = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ orig_bb_count = n_basic_blocks;
}
free_reg_set_mem ();
alloc_reg_set_mem (max_reg_num ());
@@ -894,7 +894,7 @@ gcse_main (f, file)
if (file)
{
fprintf (file, "GCSE of %s: %d basic blocks, ",
- current_function_name, num_basic_blocks);
+ current_function_name, n_basic_blocks);
fprintf (file, "%d pass%s, %d bytes\n\n",
pass, pass > 1 ? "es" : "", max_pass_bytes);
}
@@ -1019,14 +1019,14 @@ alloc_gcse_mem (f)
reg_set_bitmap = BITMAP_XMALLOC ();
/* Allocate vars to track sets of regs, memory per block. */
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
max_gcse_regno);
/* Allocate array to keep a list of insns which modify memory in each
basic block. */
- modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
- canon_modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
- memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
- memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
+ modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
+ canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
modify_mem_list_set = BITMAP_XMALLOC ();
canon_modify_mem_list_set = BITMAP_XMALLOC ();
}
@@ -1132,15 +1132,15 @@ compute_local_properties (transp, comp, antloc, setp)
if (transp)
{
if (setp)
- sbitmap_vector_zero (transp, last_basic_block);
+ sbitmap_vector_zero (transp, n_basic_blocks);
else
- sbitmap_vector_ones (transp, last_basic_block);
+ sbitmap_vector_ones (transp, n_basic_blocks);
}
if (comp)
- sbitmap_vector_zero (comp, last_basic_block);
+ sbitmap_vector_zero (comp, n_basic_blocks);
if (antloc)
- sbitmap_vector_zero (antloc, last_basic_block);
+ sbitmap_vector_zero (antloc, n_basic_blocks);
/* We use the same code for cprop, pre and hoisting. For cprop
we care about the set hash table, for pre and hoisting we
@@ -1292,13 +1292,13 @@ compute_sets (f)
struct reg_avail_info
{
- basic_block last_bb;
+ int last_bb;
int first_set;
int last_set;
};
static struct reg_avail_info *reg_avail_info;
-static basic_block current_bb;
+static int current_bb;
/* See whether X, the source of a set, is something we want to consider for
@@ -1385,7 +1385,7 @@ oprs_unchanged_p (x, insn, avail_p)
}
case MEM:
- if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
+ if (load_killed_in_block_p (BASIC_BLOCK (current_bb), INSN_CUID (insn),
x, avail_p))
return 0;
else
@@ -1499,7 +1499,7 @@ load_killed_in_block_p (bb, uid_limit, x, avail_p)
rtx x;
int avail_p;
{
- rtx list_entry = modify_mem_list[bb->sindex];
+ rtx list_entry = modify_mem_list[bb->index];
while (list_entry)
{
rtx setter;
@@ -2373,7 +2373,7 @@ record_last_reg_set_info (insn, regno)
{
info->last_bb = current_bb;
info->first_set = cuid;
- SET_BIT (reg_set_in_block[current_bb->sindex], regno);
+ SET_BIT (reg_set_in_block[current_bb], regno);
}
}
@@ -2493,7 +2493,7 @@ compute_hash_table (set_p)
registers are set in which blocks.
??? This isn't needed during const/copy propagation, but it's cheap to
compute. Later. */
- sbitmap_vector_zero (reg_set_in_block, last_basic_block);
+ sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
/* re-Cache any INSN_LIST nodes we have allocated. */
clear_modify_mem_tables ();
@@ -2502,9 +2502,9 @@ compute_hash_table (set_p)
gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
for (i = 0; i < max_gcse_regno; ++i)
- reg_avail_info[i].last_bb = NULL;
+ reg_avail_info[i].last_bb = NEVER_SET;
- FOR_ALL_BB (current_bb)
+ for (current_bb = 0; current_bb < n_basic_blocks; current_bb++)
{
rtx insn;
unsigned int regno;
@@ -2515,8 +2515,8 @@ compute_hash_table (set_p)
??? hard-reg reg_set_in_block computation
could be moved to compute_sets since they currently don't change. */
- for (insn = current_bb->head;
- insn && insn != NEXT_INSN (current_bb->end);
+ for (insn = BLOCK_HEAD (current_bb);
+ insn && insn != NEXT_INSN (BLOCK_END (current_bb));
insn = NEXT_INSN (insn))
{
if (! INSN_P (insn))
@@ -2544,8 +2544,8 @@ compute_hash_table (set_p)
/* The next pass builds the hash table. */
- for (insn = current_bb->head, in_libcall_block = 0;
- insn && insn != NEXT_INSN (current_bb->end);
+ for (insn = BLOCK_HEAD (current_bb), in_libcall_block = 0;
+ insn && insn != NEXT_INSN (BLOCK_END (current_bb));
insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
@@ -2938,16 +2938,16 @@ alloc_rd_mem (n_blocks, n_insns)
int n_blocks, n_insns;
{
rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_kill, last_basic_block);
+ sbitmap_vector_zero (rd_kill, n_basic_blocks);
rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_gen, last_basic_block);
+ sbitmap_vector_zero (rd_gen, n_basic_blocks);
reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (reaching_defs, last_basic_block);
+ sbitmap_vector_zero (reaching_defs, n_basic_blocks);
rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_out, last_basic_block);
+ sbitmap_vector_zero (rd_out, n_basic_blocks);
}
/* Free reaching def variables. */
@@ -2973,7 +2973,7 @@ handle_rd_kill_set (insn, regno, bb)
for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
- SET_BIT (rd_kill[bb->sindex], INSN_CUID (this_reg->insn));
+ SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
}
/* Compute the set of kill's for reaching definitions. */
@@ -2981,10 +2981,9 @@ handle_rd_kill_set (insn, regno, bb)
static void
compute_kill_rd ()
{
- int cuid;
+ int bb, cuid;
unsigned int regno;
int i;
- basic_block bb;
/* For each block
For each set bit in `gen' of the block (i.e each insn which
@@ -2994,9 +2993,9 @@ compute_kill_rd ()
For each setting of regx in the linked list, which is not in
this block
Set the bit in `kill' corresponding to that insn. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
for (cuid = 0; cuid < max_cuid; cuid++)
- if (TEST_BIT (rd_gen[bb->sindex], cuid))
+ if (TEST_BIT (rd_gen[bb], cuid))
{
rtx insn = CUID_INSN (cuid);
rtx pat = PATTERN (insn);
@@ -3005,7 +3004,7 @@ compute_kill_rd ()
{
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
- handle_rd_kill_set (insn, regno, bb);
+ handle_rd_kill_set (insn, regno, BASIC_BLOCK (bb));
}
if (GET_CODE (pat) == PARALLEL)
@@ -3018,13 +3017,13 @@ compute_kill_rd ()
&& GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
handle_rd_kill_set (insn,
REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
- bb);
+ BASIC_BLOCK (bb));
}
}
else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
/* Each setting of this register outside of this block
must be marked in the set of kills in this block. */
- handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
+ handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), BASIC_BLOCK (bb));
}
}
@@ -3036,22 +3035,21 @@ compute_kill_rd ()
static void
compute_rd ()
{
- int changed, passes;
- basic_block bb;
+ int bb, changed, passes;
- FOR_ALL_BB (bb)
- sbitmap_copy (rd_out[bb->sindex] /*dst*/, rd_gen[bb->sindex] /*src*/);
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
passes = 0;
changed = 1;
while (changed)
{
changed = 0;
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- sbitmap_union_of_preds (reaching_defs[bb->sindex], rd_out, bb->sindex);
- changed |= sbitmap_union_of_diff_cg (rd_out[bb->sindex], rd_gen[bb->sindex],
- reaching_defs[bb->sindex], rd_kill[bb->sindex]);
+ sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
+ changed |= sbitmap_union_of_diff_cg (rd_out[bb], rd_gen[bb],
+ reaching_defs[bb], rd_kill[bb]);
}
passes++;
}
@@ -3069,16 +3067,16 @@ alloc_avail_expr_mem (n_blocks, n_exprs)
int n_blocks, n_exprs;
{
ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_kill, n_blocks);
+ sbitmap_vector_zero (ae_kill, n_basic_blocks);
ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_gen, n_blocks);
+ sbitmap_vector_zero (ae_gen, n_basic_blocks);
ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_in, n_blocks);
+ sbitmap_vector_zero (ae_in, n_basic_blocks);
ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_out, n_blocks);
+ sbitmap_vector_zero (ae_out, n_basic_blocks);
}
static void
@@ -3127,7 +3125,7 @@ expr_killed_p (x, bb)
switch (code)
{
case REG:
- return TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x));
+ return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
case MEM:
if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
@@ -3178,20 +3176,20 @@ static void
compute_ae_kill (ae_gen, ae_kill)
sbitmap *ae_gen, *ae_kill;
{
- basic_block bb;
+ int bb;
unsigned int i;
struct expr *expr;
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
for (i = 0; i < expr_hash_table_size; i++)
for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
{
/* Skip EXPR if generated in this block. */
- if (TEST_BIT (ae_gen[bb->sindex], expr->bitmap_index))
+ if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
continue;
- if (expr_killed_p (expr->expr, bb))
- SET_BIT (ae_kill[bb->sindex], expr->bitmap_index);
+ if (expr_killed_p (expr->expr, BASIC_BLOCK (bb)))
+ SET_BIT (ae_kill[bb], expr->bitmap_index);
}
}
@@ -3227,40 +3225,40 @@ expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
{
basic_block pred_bb = pred->src;
- if (visited[pred_bb->sindex])
+ if (visited[pred_bb->index])
/* This predecessor has already been visited. Nothing to do. */
;
else if (pred_bb == bb)
{
/* BB loops on itself. */
if (check_self_loop
- && TEST_BIT (ae_gen[pred_bb->sindex], expr->bitmap_index)
- && BLOCK_NUM (occr->insn) == pred_bb->sindex)
+ && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
+ && BLOCK_NUM (occr->insn) == pred_bb->index)
return 1;
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
}
/* Ignore this predecessor if it kills the expression. */
- else if (TEST_BIT (ae_kill[pred_bb->sindex], expr->bitmap_index))
- visited[pred_bb->sindex] = 1;
+ else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
+ visited[pred_bb->index] = 1;
/* Does this predecessor generate this expression? */
- else if (TEST_BIT (ae_gen[pred_bb->sindex], expr->bitmap_index))
+ else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
{
/* Is this the occurrence we're looking for?
Note that there's only one generating occurrence per block
so we just need to check the block number. */
- if (BLOCK_NUM (occr->insn) == pred_bb->sindex)
+ if (BLOCK_NUM (occr->insn) == pred_bb->index)
return 1;
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
}
/* Neither gen nor kill. */
else
{
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
visited))
@@ -3283,7 +3281,7 @@ expr_reaches_here_p (occr, expr, bb, check_self_loop)
int check_self_loop;
{
int rval;
- char *visited = (char *) xcalloc (last_basic_block, 1);
+ char *visited = (char *) xcalloc (n_basic_blocks, 1);
rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
@@ -3607,21 +3605,20 @@ handle_avail_expr (insn, expr)
static int
classic_gcse ()
{
- int changed;
+ int bb, changed;
rtx insn;
- basic_block bb;
/* Note we start at block 1. */
changed = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
+ for (bb = 1; bb < n_basic_blocks; bb++)
{
/* Reset tables used to keep track of what's still valid [since the
start of the block]. */
reset_opr_set_tables ();
- for (insn = bb->head;
- insn != NULL && insn != NEXT_INSN (bb->end);
+ for (insn = BLOCK_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
{
/* Is insn of form (set (pseudo-reg) ...)? */
@@ -3639,7 +3636,7 @@ classic_gcse ()
&& ((expr = lookup_expr (src)) != NULL)
/* Is the expression available [at the start of the
block]? */
- && TEST_BIT (ae_in[bb->sindex], expr->bitmap_index)
+ && TEST_BIT (ae_in[bb], expr->bitmap_index)
/* Are the operands unchanged since the start of the
block? */
&& oprs_not_set_p (src, insn))
@@ -3670,7 +3667,7 @@ one_classic_gcse_pass (pass)
gcse_create_count = 0;
alloc_expr_hash_table (max_cuid);
- alloc_rd_mem (last_basic_block, max_cuid);
+ alloc_rd_mem (n_basic_blocks, max_cuid);
compute_expr_hash_table ();
if (gcse_file)
dump_hash_table (gcse_file, "Expression", expr_hash_table,
@@ -3680,7 +3677,7 @@ one_classic_gcse_pass (pass)
{
compute_kill_rd ();
compute_rd ();
- alloc_avail_expr_mem (last_basic_block, n_exprs);
+ alloc_avail_expr_mem (n_basic_blocks, n_exprs);
compute_ae_gen ();
compute_ae_kill (ae_gen, ae_kill);
compute_available (ae_gen, ae_kill, ae_out, ae_in);
@@ -3750,8 +3747,7 @@ compute_transp (x, indx, bmap, set_p)
sbitmap *bmap;
int set_p;
{
- int i, j;
- basic_block bb;
+ int bb, i, j;
enum rtx_code code;
reg_set *r;
const char *fmt;
@@ -3771,9 +3767,9 @@ compute_transp (x, indx, bmap, set_p)
{
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
{
- FOR_ALL_BB (bb)
- if (TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x)))
- SET_BIT (bmap[bb->sindex], indx);
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
+ SET_BIT (bmap[bb], indx);
}
else
{
@@ -3785,9 +3781,9 @@ compute_transp (x, indx, bmap, set_p)
{
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
{
- FOR_ALL_BB (bb)
- if (TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x)))
- RESET_BIT (bmap[bb->sindex], indx);
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
+ RESET_BIT (bmap[bb], indx);
}
else
{
@@ -3799,9 +3795,9 @@ compute_transp (x, indx, bmap, set_p)
return;
case MEM:
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- rtx list_entry = canon_modify_mem_list[bb->sindex];
+ rtx list_entry = canon_modify_mem_list[bb];
while (list_entry)
{
@@ -3810,9 +3806,9 @@ compute_transp (x, indx, bmap, set_p)
if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
{
if (set_p)
- SET_BIT (bmap[bb->sindex], indx);
+ SET_BIT (bmap[bb], indx);
else
- RESET_BIT (bmap[bb->sindex], indx);
+ RESET_BIT (bmap[bb], indx);
break;
}
/* LIST_ENTRY must be an INSN of some kind that sets memory.
@@ -3826,9 +3822,9 @@ compute_transp (x, indx, bmap, set_p)
x, rtx_addr_varies_p))
{
if (set_p)
- SET_BIT (bmap[bb->sindex], indx);
+ SET_BIT (bmap[bb], indx);
else
- RESET_BIT (bmap[bb->sindex], indx);
+ RESET_BIT (bmap[bb], indx);
break;
}
list_entry = XEXP (list_entry, 1);
@@ -4292,25 +4288,24 @@ static int
cprop (alter_jumps)
int alter_jumps;
{
- int changed;
- basic_block bb;
+ int bb, changed;
rtx insn;
/* Note we start at block 1. */
changed = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
+ for (bb = 1; bb < n_basic_blocks; bb++)
{
/* Reset tables used to keep track of what's still valid [since the
start of the block]. */
reset_opr_set_tables ();
- for (insn = bb->head;
- insn != NULL && insn != NEXT_INSN (bb->head);
+ for (insn = BLOCK_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
- changed |= cprop_insn (bb, insn, alter_jumps);
+ changed |= cprop_insn (BASIC_BLOCK (bb), insn, alter_jumps);
/* Keep track of everything modified by this insn. */
/* ??? Need to be careful w.r.t. mods done to INSN. Don't
@@ -4347,7 +4342,7 @@ one_cprop_pass (pass, alter_jumps)
n_sets);
if (n_sets > 0)
{
- alloc_cprop_mem (last_basic_block, n_sets);
+ alloc_cprop_mem (n_basic_blocks, n_sets);
compute_cprop_data ();
changed = cprop (alter_jumps);
free_cprop_mem ();
@@ -4457,11 +4452,11 @@ static void
compute_pre_data ()
{
sbitmap trapping_expr;
- basic_block bb;
+ int i;
unsigned int ui;
compute_local_properties (transp, comp, antloc, 0);
- sbitmap_vector_zero (ae_kill, last_basic_block);
+ sbitmap_vector_zero (ae_kill, n_basic_blocks);
/* Collect expressions which might trap. */
trapping_expr = sbitmap_alloc (n_exprs);
@@ -4480,7 +4475,7 @@ compute_pre_data ()
This is significantly faster than compute_ae_kill. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
edge e;
@@ -4488,16 +4483,16 @@ compute_pre_data ()
kill all trapping expressions because we won't be able to properly
place the instruction on the edge. So make them neither
anticipatable nor transparent. This is fairly conservative. */
- for (e = bb->pred; e ; e = e->pred_next)
+ for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
if (e->flags & EDGE_ABNORMAL)
{
- sbitmap_difference (antloc[bb->sindex], antloc[bb->sindex], trapping_expr);
- sbitmap_difference (transp[bb->sindex], transp[bb->sindex], trapping_expr);
+ sbitmap_difference (antloc[i], antloc[i], trapping_expr);
+ sbitmap_difference (transp[i], transp[i], trapping_expr);
break;
}
- sbitmap_a_or_b (ae_kill[bb->sindex], transp[bb->sindex], comp[bb->sindex]);
- sbitmap_not (ae_kill[bb->sindex], ae_kill[bb->sindex]);
+ sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
+ sbitmap_not (ae_kill[i], ae_kill[i]);
}
edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
@@ -4539,11 +4534,11 @@ pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
if (pred->src == ENTRY_BLOCK_PTR
/* Has predecessor has already been visited? */
- || visited[pred_bb->sindex])
+ || visited[pred_bb->index])
;/* Nothing to do. */
/* Does this predecessor generate this expression? */
- else if (TEST_BIT (comp[pred_bb->sindex], expr->bitmap_index))
+ else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
{
/* Is this the occurrence we're looking for?
Note that there's only one generating occurrence per block
@@ -4551,16 +4546,16 @@ pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
if (occr_bb == pred_bb)
return 1;
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
}
/* Ignore this predecessor if it kills the expression. */
- else if (! TEST_BIT (transp[pred_bb->sindex], expr->bitmap_index))
- visited[pred_bb->sindex] = 1;
+ else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
+ visited[pred_bb->index] = 1;
/* Neither gen nor kill. */
else
{
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
return 1;
}
@@ -4580,7 +4575,7 @@ pre_expr_reaches_here_p (occr_bb, expr, bb)
basic_block bb;
{
int rval;
- char *visited = (char *) xcalloc (last_basic_block, 1);
+ char *visited = (char *) xcalloc (n_basic_blocks, 1);
rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
@@ -4658,8 +4653,8 @@ insert_insn_end_bb (expr, bb, pre)
anywhere in the basic block with performing PRE optimizations.
Check this. */
if (GET_CODE (insn) == INSN && pre
- && !TEST_BIT (antloc[bb->sindex], expr->bitmap_index)
- && !TEST_BIT (transp[bb->sindex], expr->bitmap_index))
+ && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ && !TEST_BIT (transp[bb->index], expr->bitmap_index))
abort ();
/* If this is a jump table, then we can't insert stuff here. Since
@@ -4703,8 +4698,8 @@ insert_insn_end_bb (expr, bb, pre)
Check this. */
if (pre
- && !TEST_BIT (antloc[bb->sindex], expr->bitmap_index)
- && !TEST_BIT (transp[bb->sindex], expr->bitmap_index))
+ && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ && !TEST_BIT (transp[bb->index], expr->bitmap_index))
abort ();
/* Since different machines initialize their parameter registers
@@ -4756,7 +4751,7 @@ insert_insn_end_bb (expr, bb, pre)
if (gcse_file)
{
fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
- bb->sindex, INSN_UID (new_insn));
+ bb->index, INSN_UID (new_insn));
fprintf (gcse_file, "copying expression %d to reg %d\n",
expr->bitmap_index, regno);
}
@@ -4827,8 +4822,8 @@ pre_edge_insert (edge_list, index_map)
if (gcse_file)
{
fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
- bb->sindex,
- INDEX_EDGE_SUCC_BB (edge_list, e)->sindex);
+ bb->index,
+ INDEX_EDGE_SUCC_BB (edge_list, e)->index);
fprintf (gcse_file, "copy expression %d\n",
expr->bitmap_index);
}
@@ -4967,7 +4962,7 @@ pre_delete ()
rtx set;
basic_block bb = BLOCK_FOR_INSN (insn);
- if (TEST_BIT (pre_delete_map[bb->sindex], indx))
+ if (TEST_BIT (pre_delete_map[bb->index], indx))
{
set = single_set (insn);
if (! set)
@@ -5002,7 +4997,7 @@ pre_delete ()
"PRE: redundant insn %d (expression %d) in ",
INSN_UID (insn), indx);
fprintf (gcse_file, "bb %d, reaching reg is %d\n",
- bb->sindex, REGNO (expr->reaching_reg));
+ bb->index, REGNO (expr->reaching_reg));
}
}
}
@@ -5100,7 +5095,7 @@ one_pre_gcse_pass (pass)
if (n_exprs > 0)
{
- alloc_pre_mem (last_basic_block, n_exprs);
+ alloc_pre_mem (n_basic_blocks, n_exprs);
compute_pre_data ();
changed |= pre_gcse ();
free_edge_list (edge_list);
@@ -5184,18 +5179,18 @@ add_label_notes (x, insn)
static void
compute_transpout ()
{
- basic_block bb;
+ int bb;
unsigned int i;
struct expr *expr;
- sbitmap_vector_ones (transpout, last_basic_block);
+ sbitmap_vector_ones (transpout, n_basic_blocks);
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; ++bb)
{
/* Note that flow inserted a nop a the end of basic blocks that
end in call instructions for reasons other than abnormal
control flow. */
- if (GET_CODE (bb->end) != CALL_INSN)
+ if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
continue;
for (i = 0; i < expr_hash_table_size; i++)
@@ -5209,7 +5204,7 @@ compute_transpout ()
/* ??? Optimally, we would use interprocedural alias
analysis to determine if this mem is actually killed
by this call. */
- RESET_BIT (transpout[bb->sindex], expr->bitmap_index);
+ RESET_BIT (transpout[bb], expr->bitmap_index);
}
}
}
@@ -5242,8 +5237,8 @@ invalidate_nonnull_info (x, setter, data)
regno = REGNO (x) - npi->min_reg;
- RESET_BIT (npi->nonnull_local[npi->current_block->sindex], regno);
- SET_BIT (npi->nonnull_killed[npi->current_block->sindex], regno);
+ RESET_BIT (npi->nonnull_local[npi->current_block], regno);
+ SET_BIT (npi->nonnull_killed[npi->current_block], regno);
}
/* Do null-pointer check elimination for the registers indicated in
@@ -5258,7 +5253,8 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
sbitmap *nonnull_avout;
struct null_pointer_info *npi;
{
- basic_block bb, current_block;
+ int bb;
+ int current_block;
sbitmap *nonnull_local = npi->nonnull_local;
sbitmap *nonnull_killed = npi->nonnull_killed;
@@ -5270,10 +5266,10 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
Note that a register can have both properties in a single block. That
indicates that it's killed, then later in the block a new value is
computed. */
- sbitmap_vector_zero (nonnull_local, last_basic_block);
- sbitmap_vector_zero (nonnull_killed, last_basic_block);
+ sbitmap_vector_zero (nonnull_local, n_basic_blocks);
+ sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
- FOR_ALL_BB (current_block)
+ for (current_block = 0; current_block < n_basic_blocks; current_block++)
{
rtx insn, stop_insn;
@@ -5282,8 +5278,8 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
/* Scan each insn in the basic block looking for memory references and
register sets. */
- stop_insn = NEXT_INSN (current_block->end);
- for (insn = current_block->head;
+ stop_insn = NEXT_INSN (BLOCK_END (current_block));
+ for (insn = BLOCK_HEAD (current_block);
insn != stop_insn;
insn = NEXT_INSN (insn))
{
@@ -5311,7 +5307,7 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
&& GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
&& REGNO (reg) >= npi->min_reg
&& REGNO (reg) < npi->max_reg)
- SET_BIT (nonnull_local[current_block->sindex],
+ SET_BIT (nonnull_local[current_block],
REGNO (reg) - npi->min_reg);
/* Now invalidate stuff clobbered by this insn. */
@@ -5324,7 +5320,7 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
&& GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
&& REGNO (reg) >= npi->min_reg
&& REGNO (reg) < npi->max_reg)
- SET_BIT (nonnull_local[current_block->sindex],
+ SET_BIT (nonnull_local[current_block],
REGNO (reg) - npi->min_reg);
}
}
@@ -5336,17 +5332,17 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
/* Now look at each bb and see if it ends with a compare of a value
against zero. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- rtx last_insn = bb->end;
+ rtx last_insn = BLOCK_END (bb);
rtx condition, earliest;
int compare_and_branch;
/* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
since BLOCK_REG[BB] is zero if this block did not end with a
comparison against zero, this condition works. */
- if (block_reg[bb->sindex] < npi->min_reg
- || block_reg[bb->sindex] >= npi->max_reg)
+ if (block_reg[bb] < npi->min_reg
+ || block_reg[bb] >= npi->max_reg)
continue;
/* LAST_INSN is a conditional jump. Get its condition. */
@@ -5357,7 +5353,7 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
continue;
/* Is the register known to have a nonzero value? */
- if (!TEST_BIT (nonnull_avout[bb->sindex], block_reg[bb->sindex] - npi->min_reg))
+ if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
continue;
/* Try to compute whether the compare/branch at the loop end is one or
@@ -5385,12 +5381,12 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
delete_insn (last_insn);
if (compare_and_branch == 2)
delete_insn (earliest);
- purge_dead_edges (bb);
+ purge_dead_edges (BASIC_BLOCK (bb));
/* Don't check this block again. (Note that BLOCK_END is
invalid here; we deleted the last instruction in the
block.) */
- block_reg[bb->sindex] = 0;
+ block_reg[bb] = 0;
}
}
@@ -5424,14 +5420,14 @@ delete_null_pointer_checks (f)
{
sbitmap *nonnull_avin, *nonnull_avout;
unsigned int *block_reg;
- basic_block bb;
+ int bb;
int reg;
int regs_per_pass;
int max_reg;
struct null_pointer_info npi;
/* If we have only a single block, then there's nothing to do. */
- if (num_basic_blocks <= 1)
+ if (n_basic_blocks <= 1)
return;
/* Trying to perform global optimizations on flow graphs which have
@@ -5442,27 +5438,27 @@ delete_null_pointer_checks (f)
as blocks. But we do not want to punish small functions which have
a couple switch statements. So we require a relatively large number
of basic blocks and the ratio of edges to blocks to be high. */
- if (num_basic_blocks > 1000 && n_edges / num_basic_blocks >= 20)
+ if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
return;
/* We need four bitmaps, each with a bit for each register in each
basic block. */
max_reg = max_reg_num ();
- regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
+ regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
/* Allocate bitmaps to hold local and global properties. */
- npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
- npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
- nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
- nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
/* Go through the basic blocks, seeing whether or not each block
ends with a conditional branch whose condition is a comparison
against zero. Record the register compared in BLOCK_REG. */
- block_reg = (unsigned int *) xcalloc (last_basic_block, sizeof (int));
- FOR_ALL_BB (bb)
+ block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- rtx last_insn = bb->end;
+ rtx last_insn = BLOCK_END (bb);
rtx condition, earliest, reg;
/* We only want conditional branches. */
@@ -5488,7 +5484,7 @@ delete_null_pointer_checks (f)
if (GET_CODE (reg) != REG)
continue;
- block_reg[bb->sindex] = REGNO (reg);
+ block_reg[bb] = REGNO (reg);
}
/* Go through the algorithm for each block of registers. */
@@ -5572,11 +5568,10 @@ free_code_hoist_mem ()
static void
compute_code_hoist_vbeinout ()
{
- int changed, passes;
- basic_block bb;
+ int bb, changed, passes;
- sbitmap_vector_zero (hoist_vbeout, last_basic_block);
- sbitmap_vector_zero (hoist_vbein, last_basic_block);
+ sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
+ sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
passes = 0;
changed = 1;
@@ -5587,12 +5582,12 @@ compute_code_hoist_vbeinout ()
/* We scan the blocks in the reverse order to speed up
the convergence. */
- FOR_ALL_BB_REVERSE (bb)
+ for (bb = n_basic_blocks - 1; bb >= 0; bb--)
{
- changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->sindex], antloc[bb->sindex],
- hoist_vbeout[bb->sindex], transp[bb->sindex]);
- if (bb->next_bb != EXIT_BLOCK_PTR)
- sbitmap_intersection_of_succs (hoist_vbeout[bb->sindex], hoist_vbein, bb->sindex);
+ changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb], antloc[bb],
+ hoist_vbeout[bb], transp[bb]);
+ if (bb != n_basic_blocks - 1)
+ sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
}
passes++;
@@ -5642,7 +5637,7 @@ hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
if (visited == NULL)
{
visited_allocated_locally = 1;
- visited = xcalloc (last_basic_block, 1);
+ visited = xcalloc (n_basic_blocks, 1);
}
for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
@@ -5651,19 +5646,19 @@ hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
if (pred->src == ENTRY_BLOCK_PTR)
break;
- else if (visited[pred_bb->sindex])
+ else if (visited[pred_bb->index])
continue;
/* Does this predecessor generate this expression? */
- else if (TEST_BIT (comp[pred_bb->sindex], expr_index))
+ else if (TEST_BIT (comp[pred_bb->index], expr_index))
break;
- else if (! TEST_BIT (transp[pred_bb->sindex], expr_index))
+ else if (! TEST_BIT (transp[pred_bb->index], expr_index))
break;
/* Not killed. */
else
{
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
pred_bb, visited))
break;
@@ -5680,12 +5675,12 @@ hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
static void
hoist_code ()
{
- basic_block bb, dominated;
+ int bb, dominated;
unsigned int i;
struct expr **index_map;
struct expr *expr;
- sbitmap_vector_zero (hoist_exprs, last_basic_block);
+ sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
@@ -5697,34 +5692,33 @@ hoist_code ()
/* Walk over each basic block looking for potentially hoistable
expressions, nothing gets hoisted from the entry block. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
int found = 0;
int insn_inserted_p;
/* Examine each expression that is very busy at the exit of this
block. These are the potentially hoistable expressions. */
- for (i = 0; i < hoist_vbeout[bb->sindex]->n_bits; i++)
+ for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
{
int hoistable = 0;
- if (TEST_BIT (hoist_vbeout[bb->sindex], i)
- && TEST_BIT (transpout[bb->sindex], i))
+ if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
{
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
computes the expression. */
- FOR_ALL_BB (dominated)
+ for (dominated = 0; dominated < n_basic_blocks; dominated++)
{
/* Ignore self dominance. */
if (bb == dominated
- || ! TEST_BIT (dominators[dominated->sindex], bb->sindex))
+ || ! TEST_BIT (dominators[dominated], bb))
continue;
/* We've found a dominated block, now see if it computes
the busy expression and whether or not moving that
expression to the "beginning" of that block is safe. */
- if (!TEST_BIT (antloc[dominated->sindex], i))
+ if (!TEST_BIT (antloc[dominated], i))
continue;
/* Note if the expression would reach the dominated block
@@ -5732,7 +5726,8 @@ hoist_code ()
Keep track of how many times this expression is hoistable
from a dominated block into BB. */
- if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
+ if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
+ BASIC_BLOCK (dominated), NULL))
hoistable++;
}
@@ -5748,7 +5743,7 @@ hoist_code ()
to nullify any benefit we get from code hoisting. */
if (hoistable > 1)
{
- SET_BIT (hoist_exprs[bb->sindex], i);
+ SET_BIT (hoist_exprs[bb], i);
found = 1;
}
}
@@ -5759,29 +5754,29 @@ hoist_code ()
continue;
/* Loop over all the hoistable expressions. */
- for (i = 0; i < hoist_exprs[bb->sindex]->n_bits; i++)
+ for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
{
/* We want to insert the expression into BB only once, so
note when we've inserted it. */
insn_inserted_p = 0;
/* These tests should be the same as the tests above. */
- if (TEST_BIT (hoist_vbeout[bb->sindex], i))
+ if (TEST_BIT (hoist_vbeout[bb], i))
{
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
computes the expression. */
- FOR_ALL_BB (dominated)
+ for (dominated = 0; dominated < n_basic_blocks; dominated++)
{
/* Ignore self dominance. */
if (bb == dominated
- || ! TEST_BIT (dominators[dominated->sindex], bb->sindex))
+ || ! TEST_BIT (dominators[dominated], bb))
continue;
/* We've found a dominated block, now see if it computes
the busy expression and whether or not moving that
expression to the "beginning" of that block is safe. */
- if (!TEST_BIT (antloc[dominated->sindex], i))
+ if (!TEST_BIT (antloc[dominated], i))
continue;
/* The expression is computed in the dominated block and
@@ -5789,7 +5784,8 @@ hoist_code ()
dominated block. Now we have to determine if the
expression would reach the dominated block if it was
placed at the end of BB. */
- if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
+ if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
+ BASIC_BLOCK (dominated), NULL))
{
struct expr *expr = index_map[i];
struct occr *occr = expr->antic_occr;
@@ -5797,7 +5793,7 @@ hoist_code ()
rtx set;
/* Find the right occurrence of this expression. */
- while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
+ while (BLOCK_NUM (occr->insn) != dominated && occr)
occr = occr->next;
/* Should never happen. */
@@ -5831,7 +5827,8 @@ hoist_code ()
occr->deleted_p = 1;
if (!insn_inserted_p)
{
- insert_insn_end_bb (index_map[i], bb, 0);
+ insert_insn_end_bb (index_map[i],
+ BASIC_BLOCK (bb), 0);
insn_inserted_p = 1;
}
}
@@ -5861,7 +5858,7 @@ one_code_hoisting_pass ()
if (n_exprs > 0)
{
- alloc_code_hoist_mem (last_basic_block, n_exprs);
+ alloc_code_hoist_mem (n_basic_blocks, n_exprs);
compute_code_hoist_data ();
hoist_code ();
free_code_hoist_mem ();
@@ -6111,15 +6108,15 @@ static void
compute_ld_motion_mems ()
{
struct ls_expr * ptr;
- basic_block bb;
+ int bb;
rtx insn;
pre_ldst_mems = NULL;
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- for (insn = bb->head;
- insn && insn != NEXT_INSN (bb->end);
+ for (insn = BLOCK_HEAD (bb);
+ insn && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
{
if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
@@ -6337,7 +6334,7 @@ store_ops_ok (x, bb)
case REG:
/* If a reg has changed after us in this
block, the operand has been killed. */
- return TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x));
+ return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
case MEM:
x = XEXP (x, 0);
@@ -6436,24 +6433,23 @@ find_moveable_store (insn)
static int
compute_store_table ()
{
- int ret;
- basic_block bb;
+ int bb, ret;
unsigned regno;
rtx insn, pat;
max_gcse_regno = max_reg_num ();
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
max_gcse_regno);
- sbitmap_vector_zero (reg_set_in_block, last_basic_block);
+ sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
pre_ldst_mems = 0;
/* Find all the stores we care about. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- regvec = & (reg_set_in_block[bb->sindex]);
- for (insn = bb->end;
- insn && insn != PREV_INSN (bb->end);
+ regvec = & (reg_set_in_block[bb]);
+ for (insn = BLOCK_END (bb);
+ insn && insn != PREV_INSN (BLOCK_HEAD (bb));
insn = PREV_INSN (insn))
{
/* Ignore anything that is not a normal insn. */
@@ -6472,7 +6468,7 @@ compute_store_table ()
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (clobbers_all
|| TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
- SET_BIT (reg_set_in_block[bb->sindex], regno);
+ SET_BIT (reg_set_in_block[bb], regno);
}
pat = PATTERN (insn);
@@ -6638,17 +6634,18 @@ store_killed_before (x, insn, bb)
static void
build_store_vectors ()
{
- basic_block bb, b;
+ basic_block bb;
+ int b;
rtx insn, st;
struct ls_expr * ptr;
/* Build the gen_vector. This is any store in the table which is not killed
by aliasing later in its block. */
- ae_gen = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (ae_gen, last_basic_block);
+ ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (ae_gen, n_basic_blocks);
- st_antloc = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (st_antloc, last_basic_block);
+ st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (st_antloc, n_basic_blocks);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
@@ -6669,7 +6666,7 @@ build_store_vectors ()
the block), and replace it with this one). We'll copy the
old SRC expression to an unused register in case there
are any side effects. */
- if (TEST_BIT (ae_gen[bb->sindex], ptr->index))
+ if (TEST_BIT (ae_gen[bb->index], ptr->index))
{
/* Find previous store. */
rtx st;
@@ -6686,7 +6683,7 @@ build_store_vectors ()
continue;
}
}
- SET_BIT (ae_gen[bb->sindex], ptr->index);
+ SET_BIT (ae_gen[bb->index], ptr->index);
AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
AVAIL_STORE_LIST (ptr));
}
@@ -6703,16 +6700,16 @@ build_store_vectors ()
free_INSN_LIST_list (&store_list);
}
- ae_kill = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (ae_kill, last_basic_block);
+ ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (ae_kill, n_basic_blocks);
- transp = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (transp, last_basic_block);
+ transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (transp, n_basic_blocks);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
- FOR_ALL_BB (b)
+ for (b = 0; b < n_basic_blocks; b++)
{
- if (store_killed_after (ptr->pattern, b->head, b))
+ if (store_killed_after (ptr->pattern, BLOCK_HEAD (b), BASIC_BLOCK (b)))
{
/* The anticipatable expression is not killed if it's gen'd. */
/*
@@ -6730,10 +6727,10 @@ build_store_vectors ()
If we always kill it in this case, we'll sometimes do
uneccessary work, but it shouldn't actually hurt anything.
if (!TEST_BIT (ae_gen[b], ptr->index)). */
- SET_BIT (ae_kill[b->sindex], ptr->index);
+ SET_BIT (ae_kill[b], ptr->index);
}
else
- SET_BIT (transp[b->sindex], ptr->index);
+ SET_BIT (transp[b], ptr->index);
}
/* Any block with no exits calls some non-returning function, so
@@ -6744,10 +6741,10 @@ build_store_vectors ()
{
fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
print_ldst_list (gcse_file);
- dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
- dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
- dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
- dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "Transpt", "", transp, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, n_basic_blocks);
}
}
@@ -6779,7 +6776,7 @@ insert_insn_start_bb (insn, bb)
if (gcse_file)
{
fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
- bb->sindex);
+ bb->index);
print_inline_rtx (gcse_file, insn, 6);
fprintf (gcse_file, "\n");
}
@@ -6845,7 +6842,7 @@ insert_store (expr, e)
if (gcse_file)
{
fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
print_inline_rtx (gcse_file, insn, 6);
fprintf (gcse_file, "\n");
}
@@ -6868,7 +6865,7 @@ replace_store_insn (reg, del, bb)
if (gcse_file)
{
fprintf (gcse_file,
- "STORE_MOTION delete insn in BB %d:\n ", bb->sindex);
+ "STORE_MOTION delete insn in BB %d:\n ", bb->index);
print_inline_rtx (gcse_file, del, 6);
fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
print_inline_rtx (gcse_file, insn, 6);
@@ -6942,8 +6939,7 @@ free_store_memory ()
static void
store_motion ()
{
- basic_block x;
- int y;
+ int x;
struct ls_expr * ptr;
int update_flow = 0;
@@ -6976,13 +6972,13 @@ store_motion ()
/* Now we want to insert the new stores which are going to be needed. */
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
- FOR_ALL_BB (x)
- if (TEST_BIT (pre_delete_map[x->sindex], ptr->index))
- delete_store (ptr, x);
+ for (x = 0; x < n_basic_blocks; x++)
+ if (TEST_BIT (pre_delete_map[x], ptr->index))
+ delete_store (ptr, BASIC_BLOCK (x));
- for (y = 0; y < NUM_EDGES (edge_list); y++)
- if (TEST_BIT (pre_insert_map[y], ptr->index))
- update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, y));
+ for (x = 0; x < NUM_EDGES (edge_list); x++)
+ if (TEST_BIT (pre_insert_map[x], ptr->index))
+ update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
}
if (update_flow)
diff --git a/gcc/global.c b/gcc/global.c
index c3edf1a..0d9618c 100644
--- a/gcc/global.c
+++ b/gcc/global.c
@@ -583,7 +583,7 @@ global_alloc (file)
#if 0 /* We need to eliminate regs even if there is no rtl code,
for the sake of debugging information. */
- if (num_basic_blocks > 0)
+ if (n_basic_blocks > 0)
#endif
{
build_insn_chain (get_insns ());
@@ -636,8 +636,7 @@ allocno_compare (v1p, v2p)
static void
global_conflicts ()
{
- int i;
- basic_block b;
+ int b, i;
rtx insn;
int *block_start_allocnos;
@@ -646,7 +645,7 @@ global_conflicts ()
block_start_allocnos = (int *) xmalloc (max_allocno * sizeof (int));
- FOR_ALL_BB (b)
+ for (b = 0; b < n_basic_blocks; b++)
{
memset ((char *) allocnos_live, 0, allocno_row_words * sizeof (INT_TYPE));
@@ -665,7 +664,7 @@ global_conflicts ()
are explicitly marked in basic_block_live_at_start. */
{
- regset old = b->global_live_at_start;
+ regset old = BASIC_BLOCK (b)->global_live_at_start;
int ax = 0;
REG_SET_TO_HARD_REG_SET (hard_regs_live, old);
@@ -714,7 +713,7 @@ global_conflicts ()
that is reached by an abnormal edge. */
edge e;
- for (e = b->pred; e ; e = e->pred_next)
+ for (e = BASIC_BLOCK (b)->pred; e ; e = e->pred_next)
if (e->flags & EDGE_ABNORMAL)
break;
if (e != NULL)
@@ -724,7 +723,7 @@ global_conflicts ()
#endif
}
- insn = b->head;
+ insn = BLOCK_HEAD (b);
/* Scan the code of this basic block, noting which allocnos
and hard regs are born or die. When one is born,
@@ -824,7 +823,7 @@ global_conflicts ()
}
}
- if (insn == b->end)
+ if (insn == BLOCK_END (b))
break;
insn = NEXT_INSN (insn);
}
@@ -1709,11 +1708,11 @@ void
mark_elimination (from, to)
int from, to;
{
- basic_block bb;
+ int i;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
- regset r = bb->global_live_at_start;
+ regset r = BASIC_BLOCK (i)->global_live_at_start;
if (REGNO_REG_SET_P (r, from))
{
CLEAR_REGNO_REG_SET (r, from);
@@ -1795,7 +1794,7 @@ build_insn_chain (first)
{
struct insn_chain **p = &reload_insn_chain;
struct insn_chain *prev = 0;
- basic_block b = ENTRY_BLOCK_PTR->next_bb;
+ int b = 0;
regset_head live_relevant_regs_head;
live_relevant_regs = INITIALIZE_REG_SET (live_relevant_regs_head);
@@ -1804,14 +1803,14 @@ build_insn_chain (first)
{
struct insn_chain *c;
- if (first == b->head)
+ if (first == BLOCK_HEAD (b))
{
int i;
CLEAR_REG_SET (live_relevant_regs);
EXECUTE_IF_SET_IN_BITMAP
- (b->global_live_at_start, 0, i,
+ (BASIC_BLOCK (b)->global_live_at_start, 0, i,
{
if (i < FIRST_PSEUDO_REGISTER
? ! TEST_HARD_REG_BIT (eliminable_regset, i)
@@ -1828,7 +1827,7 @@ build_insn_chain (first)
*p = c;
p = &c->next;
c->insn = first;
- c->block = b->sindex;
+ c->block = b;
if (INSN_P (first))
{
@@ -1866,8 +1865,8 @@ build_insn_chain (first)
}
}
- if (first == b->end)
- b = b->next_bb;
+ if (first == BLOCK_END (b))
+ b++;
/* Stop after we pass the end of the last basic block. Verify that
no real insns are after the end of the last basic block.
@@ -1875,7 +1874,7 @@ build_insn_chain (first)
We may want to reorganize the loop somewhat since this test should
always be the right exit test. Allow an ADDR_VEC or ADDR_DIF_VEC if
the previous real insn is a JUMP_INSN. */
- if (b == EXIT_BLOCK_PTR)
+ if (b == n_basic_blocks)
{
for (first = NEXT_INSN (first) ; first; first = NEXT_INSN (first))
if (INSN_P (first)
diff --git a/gcc/graph.c b/gcc/graph.c
index 3dd6f6f..8723047 100644
--- a/gcc/graph.c
+++ b/gcc/graph.c
@@ -258,6 +258,7 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
fprintf (fp, "(nil)\n");
else
{
+ int i;
enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
int max_uid = get_max_uid ();
int *start = (int *) xmalloc (max_uid * sizeof (int));
@@ -265,19 +266,19 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
enum bb_state *in_bb_p = (enum bb_state *)
xmalloc (max_uid * sizeof (enum bb_state));
basic_block bb;
- int j;
- for (j = 0; j < max_uid; ++j)
+ for (i = 0; i < max_uid; ++i)
{
- start[j] = end[j] = -1;
- in_bb_p[j] = NOT_IN_BB;
+ start[i] = end[i] = -1;
+ in_bb_p[i] = NOT_IN_BB;
}
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
rtx x;
- start[INSN_UID (bb->head)] = bb->sindex;
- end[INSN_UID (bb->end)] = bb->sindex;
+ bb = BASIC_BLOCK (i);
+ start[INSN_UID (bb->head)] = i;
+ end[INSN_UID (bb->end)] = i;
for (x = bb->head; x != NULL_RTX; x = NEXT_INSN (x))
{
in_bb_p[INSN_UID (x)]
@@ -309,12 +310,12 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
continue;
}
- if ((j = start[INSN_UID (tmp_rtx)]) >= 0)
+ if ((i = start[INSN_UID (tmp_rtx)]) >= 0)
{
/* We start a subgraph for each basic block. */
- start_bb (fp, j);
+ start_bb (fp, i);
- if (j == 0)
+ if (i == 0)
draw_edge (fp, 0, INSN_UID (tmp_rtx), 1, 0);
}
@@ -322,11 +323,11 @@ print_rtl_graph_with_bb (base, suffix, rtx_first)
node_data (fp, tmp_rtx);
next_insn = next_nonnote_insn (tmp_rtx);
- if ((j = end[INSN_UID (tmp_rtx)]) >= 0)
+ if ((i = end[INSN_UID (tmp_rtx)]) >= 0)
{
edge e;
- bb = BASIC_BLOCK (j);
+ bb = BASIC_BLOCK (i);
/* End of the basic block. */
end_bb (fp);
diff --git a/gcc/haifa-sched.c b/gcc/haifa-sched.c
index d520ef3..6b3e316 100644
--- a/gcc/haifa-sched.c
+++ b/gcc/haifa-sched.c
@@ -2303,8 +2303,7 @@ void
sched_init (dump_file)
FILE *dump_file;
{
- int luid;
- basic_block b;
+ int luid, b;
rtx insn;
int i;
@@ -2357,8 +2356,8 @@ sched_init (dump_file)
h_i_d[0].luid = 0;
luid = 1;
- FOR_ALL_BB (b)
- for (insn = b->head;; insn = NEXT_INSN (insn))
+ for (b = 0; b < n_basic_blocks; b++)
+ for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
{
INSN_LUID (insn) = luid;
@@ -2370,7 +2369,7 @@ sched_init (dump_file)
if (GET_CODE (insn) != NOTE)
++luid;
- if (insn == b->end)
+ if (insn == BLOCK_END (b))
break;
}
@@ -2384,7 +2383,7 @@ sched_init (dump_file)
{
rtx line;
- line_note_head = (rtx *) xcalloc (last_basic_block, sizeof (rtx));
+ line_note_head = (rtx *) xcalloc (n_basic_blocks, sizeof (rtx));
/* Save-line-note-head:
Determine the line-number at the start of each basic block.
@@ -2392,22 +2391,22 @@ sched_init (dump_file)
predecessor has been scheduled, it is impossible to accurately
determine the correct line number for the first insn of the block. */
- FOR_ALL_BB (b)
+ for (b = 0; b < n_basic_blocks; b++)
{
- for (line = b->head; line; line = PREV_INSN (line))
+ for (line = BLOCK_HEAD (b); line; line = PREV_INSN (line))
if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
{
- line_note_head[b->sindex] = line;
+ line_note_head[b] = line;
break;
}
/* Do a forward search as well, since we won't get to see the first
notes in a basic block. */
- for (line = b->head; line; line = NEXT_INSN (line))
+ for (line = BLOCK_HEAD (b); line; line = NEXT_INSN (line))
{
if (INSN_P (line))
break;
if (GET_CODE (line) == NOTE && NOTE_LINE_NUMBER (line) > 0)
- line_note_head[b->sindex] = line;
+ line_note_head[b] = line;
}
}
}
@@ -2421,22 +2420,22 @@ sched_init (dump_file)
/* ??? Add a NOTE after the last insn of the last basic block. It is not
known why this is done. */
- insn = EXIT_BLOCK_PTR->prev_bb->end;
+ insn = BLOCK_END (n_basic_blocks - 1);
if (NEXT_INSN (insn) == 0
|| (GET_CODE (insn) != NOTE
&& GET_CODE (insn) != CODE_LABEL
/* Don't emit a NOTE if it would end up before a BARRIER. */
&& GET_CODE (NEXT_INSN (insn)) != BARRIER))
{
- emit_note_after (NOTE_INSN_DELETED, EXIT_BLOCK_PTR->prev_bb->end);
+ emit_note_after (NOTE_INSN_DELETED, BLOCK_END (n_basic_blocks - 1));
/* Make insn to appear outside BB. */
- EXIT_BLOCK_PTR->prev_bb->end = PREV_INSN (EXIT_BLOCK_PTR->prev_bb->end);
+ BLOCK_END (n_basic_blocks - 1) = PREV_INSN (BLOCK_END (n_basic_blocks - 1));
}
/* Compute INSN_REG_WEIGHT for all blocks. We must do this before
removing death notes. */
- FOR_ALL_BB_REVERSE (b)
- find_insn_reg_weight (b->sindex);
+ for (b = n_basic_blocks - 1; b >= 0; b--)
+ find_insn_reg_weight (b);
}
/* Free global data used during insn scheduling. */
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
index 4b929ab..e8c2b5f 100644
--- a/gcc/ifcvt.c
+++ b/gcc/ifcvt.c
@@ -111,6 +111,14 @@ static int dead_or_predicable PARAMS ((basic_block, basic_block,
basic_block, basic_block, int));
static void noce_emit_move_insn PARAMS ((rtx, rtx));
+/* Abuse the basic_block AUX field to store the original block index,
+ as well as a flag indicating that the block should be rescaned for
+ life analysis. */
+
+#define SET_ORIG_INDEX(BB,I) ((BB)->aux = (void *)((size_t)(I)))
+#define ORIG_INDEX(BB) ((size_t)(BB)->aux)
+
+
/* Count the number of non-jump active insns in BB. */
static int
@@ -1965,7 +1973,7 @@ find_if_block (test_bb, then_edge, else_edge)
basic_block join_bb = NULL_BLOCK;
edge then_succ = then_bb->succ;
edge else_succ = else_bb->succ;
- basic_block next;
+ int next_index;
/* The THEN block of an IF-THEN combo must have exactly one predecessor. */
if (then_bb->pred->pred_next != NULL_EDGE)
@@ -2035,12 +2043,12 @@ find_if_block (test_bb, then_edge, else_edge)
if (else_bb)
fprintf (rtl_dump_file,
"\nIF-THEN-ELSE block found, start %d, then %d, else %d, join %d\n",
- test_bb->sindex, then_bb->sindex, else_bb->sindex,
- join_bb->sindex);
+ test_bb->index, then_bb->index, else_bb->index,
+ join_bb->index);
else
fprintf (rtl_dump_file,
"\nIF-THEN block found, start %d, then %d, join %d\n",
- test_bb->sindex, then_bb->sindex, join_bb->sindex);
+ test_bb->index, then_bb->index, join_bb->index);
}
/* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we
@@ -2049,10 +2057,10 @@ find_if_block (test_bb, then_edge, else_edge)
/* ??? As an enhancement, move the ELSE block. Have to deal with
BLOCK notes, if by no other means than aborting the merge if they
exist. Sticky enough I don't want to think about it now. */
- next = then_bb;
- if (else_bb && (next = next->next_bb) != else_bb)
+ next_index = then_bb->index;
+ if (else_bb && ++next_index != else_bb->index)
return FALSE;
- if ((next = next->next_bb) != join_bb && join_bb != EXIT_BLOCK_PTR)
+ if (++next_index != join_bb->index && join_bb->index != EXIT_BLOCK)
{
if (else_bb)
join_bb = NULL;
@@ -2092,7 +2100,7 @@ find_cond_trap (test_bb, then_edge, else_edge)
if (rtl_dump_file)
{
fprintf (rtl_dump_file, "\nTRAP-IF block found, start %d, trap %d\n",
- test_bb->sindex, trap_bb->sindex);
+ test_bb->index, trap_bb->index);
}
/* If this is not a standard conditional jump, we can't parse it. */
@@ -2138,7 +2146,7 @@ find_cond_trap (test_bb, then_edge, else_edge)
/* If the non-trap block and the test are now adjacent, merge them.
Otherwise we must insert a direct branch. */
- if (test_bb->next_bb == other_bb)
+ if (test_bb->index + 1 == other_bb->index)
{
delete_insn (jump);
merge_if_block (test_bb, NULL, NULL, other_bb);
@@ -2292,7 +2300,7 @@ find_if_case_1 (test_bb, then_edge, else_edge)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"\nIF-CASE-1 found, start %d, then %d\n",
- test_bb->sindex, then_bb->sindex);
+ test_bb->index, then_bb->index);
/* THEN is small. */
if (count_bb_insns (then_bb) > BRANCH_COST)
@@ -2313,6 +2321,8 @@ find_if_case_1 (test_bb, then_edge, else_edge)
new_bb = redirect_edge_and_branch_force (FALLTHRU_EDGE (test_bb), else_bb);
/* Make rest of code believe that the newly created block is the THEN_BB
block we are going to remove. */
+ if (new_bb)
+ new_bb->aux = then_bb->aux;
flow_delete_block (then_bb);
/* We've possibly created jump to next insn, cleanup_cfg will solve that
later. */
@@ -2348,16 +2358,16 @@ find_if_case_2 (test_bb, then_edge, else_edge)
return FALSE;
/* THEN is not EXIT. */
- if (then_bb == EXIT_BLOCK_PTR)
+ if (then_bb->index < 0)
return FALSE;
/* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
note = find_reg_note (test_bb->end, REG_BR_PROB, NULL_RTX);
if (note && INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2)
;
- else if (else_succ->dest == EXIT_BLOCK_PTR
- || TEST_BIT (post_dominators[then_bb->sindex],
- else_succ->dest->sindex))
+ else if (else_succ->dest->index < 0
+ || TEST_BIT (post_dominators[ORIG_INDEX (then_bb)],
+ ORIG_INDEX (else_succ->dest)))
;
else
return FALSE;
@@ -2366,7 +2376,7 @@ find_if_case_2 (test_bb, then_edge, else_edge)
if (rtl_dump_file)
fprintf (rtl_dump_file,
"\nIF-CASE-2 found, start %d, else %d\n",
- test_bb->sindex, else_bb->sindex);
+ test_bb->index, else_bb->index);
/* ELSE is small. */
if (count_bb_insns (then_bb) > BRANCH_COST)
@@ -2675,7 +2685,7 @@ void
if_convert (x_life_data_ok)
int x_life_data_ok;
{
- basic_block bb;
+ int block_num;
num_possible_if_blocks = 0;
num_updated_if_blocks = 0;
@@ -2690,17 +2700,25 @@ if_convert (x_life_data_ok)
post_dominators = NULL;
if (HAVE_conditional_execution || life_data_ok)
{
- post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
}
if (life_data_ok)
clear_bb_flags ();
+ /* Record initial block numbers. */
+ for (block_num = 0; block_num < n_basic_blocks; block_num++)
+ SET_ORIG_INDEX (BASIC_BLOCK (block_num), block_num);
+
/* Go through each of the basic blocks looking for things to convert. */
- FOR_ALL_BB (bb)
- while (find_if_header (bb))
- {
- }
+ for (block_num = 0; block_num < n_basic_blocks; )
+ {
+ basic_block bb = BASIC_BLOCK (block_num);
+ if (find_if_header (bb))
+ block_num = bb->index;
+ else
+ block_num++;
+ }
if (post_dominators)
sbitmap_vector_free (post_dominators);
diff --git a/gcc/lcm.c b/gcc/lcm.c
index 1db53c6..bc95aea 100644
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -106,7 +106,7 @@ compute_antinout_edge (antloc, transp, antin, antout)
sbitmap *antin;
sbitmap *antout;
{
- basic_block bb;
+ int bb;
edge e;
basic_block *worklist, *qin, *qout, *qend;
unsigned int qlen;
@@ -115,23 +115,23 @@ compute_antinout_edge (antloc, transp, antin, antout)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks);
+ = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
/* We want a maximal solution, so make an optimistic initialization of
ANTIN. */
- sbitmap_vector_ones (antin, last_basic_block);
+ sbitmap_vector_ones (antin, n_basic_blocks);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of ANTIN above. */
- FOR_ALL_BB_REVERSE (bb)
+ for (bb = n_basic_blocks - 1; bb >= 0; bb--)
{
- *qin++ = bb;
- bb->aux = bb;
+ *qin++ = BASIC_BLOCK (bb);
+ BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
}
qin = worklist;
- qend = &worklist[num_basic_blocks];
- qlen = num_basic_blocks;
+ qend = &worklist[n_basic_blocks];
+ qlen = n_basic_blocks;
/* Mark blocks which are predecessors of the exit block so that we
can easily identify them below. */
@@ -142,31 +142,32 @@ compute_antinout_edge (antloc, transp, antin, antout)
while (qlen)
{
/* Take the first entry off the worklist. */
- basic_block bb = *qout++;
+ basic_block b = *qout++;
+ bb = b->index;
qlen--;
if (qout >= qend)
qout = worklist;
- if (bb->aux == EXIT_BLOCK_PTR)
+ if (b->aux == EXIT_BLOCK_PTR)
/* Do not clear the aux field for blocks which are predecessors of
the EXIT block. That way we never add then to the worklist
again. */
- sbitmap_zero (antout[bb->sindex]);
+ sbitmap_zero (antout[bb]);
else
{
/* Clear the aux field of this block so that it can be added to
the worklist again if necessary. */
- bb->aux = NULL;
- sbitmap_intersection_of_succs (antout[bb->sindex], antin, bb->sindex);
+ b->aux = NULL;
+ sbitmap_intersection_of_succs (antout[bb], antin, bb);
}
- if (sbitmap_a_or_b_and_c_cg (antin[bb->sindex], antloc[bb->sindex],
- transp[bb->sindex], antout[bb->sindex]))
+ if (sbitmap_a_or_b_and_c_cg (antin[bb], antloc[bb],
+ transp[bb], antout[bb]))
/* If the in state of this block changed, then we need
to add the predecessors of this block to the worklist
if they are not already on the worklist. */
- for (e = bb->pred; e; e = e->pred_next)
+ for (e = b->pred; e; e = e->pred_next)
if (!e->src->aux && e->src != ENTRY_BLOCK_PTR)
{
*qin++ = e->src;
@@ -204,22 +205,22 @@ compute_earliest (edge_list, n_exprs, antin, antout, avout, kill, earliest)
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
if (pred == ENTRY_BLOCK_PTR)
- sbitmap_copy (earliest[x], antin[succ->sindex]);
+ sbitmap_copy (earliest[x], antin[succ->index]);
else
{
/* We refer to the EXIT_BLOCK index, instead of testing for
EXIT_BLOCK_PTR, so that EXIT_BLOCK_PTR's index can be
changed so as to pretend it's a regular block, so that
its antin can be taken into account. */
- if (succ->sindex == EXIT_BLOCK)
+ if (succ->index == EXIT_BLOCK)
sbitmap_zero (earliest[x]);
else
{
- sbitmap_difference (difference, antin[succ->sindex],
- avout[pred->sindex]);
- sbitmap_not (temp_bitmap, antout[pred->sindex]);
+ sbitmap_difference (difference, antin[succ->index],
+ avout[pred->index]);
+ sbitmap_not (temp_bitmap, antout[pred->index]);
sbitmap_a_and_b_or_c (earliest[x], difference,
- kill[pred->sindex], temp_bitmap);
+ kill[pred->index], temp_bitmap);
}
}
}
@@ -262,9 +263,9 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
struct edge_list *edge_list;
sbitmap *earliest, *antloc, *later, *laterin;
{
- int num_edges, i;
+ int bb, num_edges, i;
edge e;
- basic_block *worklist, *qin, *qout, *qend, bb;
+ basic_block *worklist, *qin, *qout, *qend;
unsigned int qlen;
num_edges = NUM_EDGES (edge_list);
@@ -273,7 +274,7 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * (num_basic_blocks + 1));
+ = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
/* Initialize a mapping from each edge to its index. */
for (i = 0; i < num_edges; i++)
@@ -300,39 +301,41 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
/* Add all the blocks to the worklist. This prevents an early exit from
the loop given our optimistic initialization of LATER above. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- *qin++ = bb;
- bb->aux = bb;
+ basic_block b = BASIC_BLOCK (bb);
+ *qin++ = b;
+ b->aux = b;
}
qin = worklist;
/* Note that we do not use the last allocated element for our queue,
as EXIT_BLOCK is never inserted into it. In fact the above allocation
- of num_basic_blocks + 1 elements is not encessary. */
- qend = &worklist[num_basic_blocks];
- qlen = num_basic_blocks;
+ of n_basic_blocks + 1 elements is not encessary. */
+ qend = &worklist[n_basic_blocks];
+ qlen = n_basic_blocks;
/* Iterate until the worklist is empty. */
while (qlen)
{
/* Take the first entry off the worklist. */
- bb = *qout++;
- bb->aux = NULL;
+ basic_block b = *qout++;
+ b->aux = NULL;
qlen--;
if (qout >= qend)
qout = worklist;
/* Compute the intersection of LATERIN for each incoming edge to B. */
- sbitmap_ones (laterin[bb->sindex]);
- for (e = bb->pred; e != NULL; e = e->pred_next)
- sbitmap_a_and_b (laterin[bb->sindex], laterin[bb->sindex], later[(size_t)e->aux]);
+ bb = b->index;
+ sbitmap_ones (laterin[bb]);
+ for (e = b->pred; e != NULL; e = e->pred_next)
+ sbitmap_a_and_b (laterin[bb], laterin[bb], later[(size_t)e->aux]);
/* Calculate LATER for all outgoing edges. */
- for (e = bb->succ; e != NULL; e = e->succ_next)
+ for (e = b->succ; e != NULL; e = e->succ_next)
if (sbitmap_union_of_diff_cg (later[(size_t) e->aux],
- earliest[(size_t) e->aux],
- laterin[e->src->sindex],
- antloc[e->src->sindex])
+ earliest[(size_t) e->aux],
+ laterin[e->src->index],
+ antloc[e->src->index])
/* If LATER for an outgoing edge was changed, then we need
to add the target of the outgoing edge to the worklist. */
&& e->dest != EXIT_BLOCK_PTR && e->dest->aux == 0)
@@ -348,10 +351,10 @@ compute_laterin (edge_list, earliest, antloc, later, laterin)
/* Computation of insertion and deletion points requires computing LATERIN
for the EXIT block. We allocated an extra entry in the LATERIN array
for just this purpose. */
- sbitmap_ones (laterin[last_basic_block]);
+ sbitmap_ones (laterin[n_basic_blocks]);
for (e = EXIT_BLOCK_PTR->pred; e != NULL; e = e->pred_next)
- sbitmap_a_and_b (laterin[last_basic_block],
- laterin[last_basic_block],
+ sbitmap_a_and_b (laterin[n_basic_blocks],
+ laterin[n_basic_blocks],
later[(size_t) e->aux]);
clear_aux_for_edges ();
@@ -367,19 +370,18 @@ compute_insert_delete (edge_list, antloc, later, laterin,
sbitmap *antloc, *later, *laterin, *insert, *delete;
{
int x;
- basic_block bb;
- FOR_ALL_BB (bb)
- sbitmap_difference (delete[bb->sindex], antloc[bb->sindex], laterin[bb->sindex]);
+ for (x = 0; x < n_basic_blocks; x++)
+ sbitmap_difference (delete[x], antloc[x], laterin[x]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_SUCC_BB (edge_list, x);
if (b == EXIT_BLOCK_PTR)
- sbitmap_difference (insert[x], later[x], laterin[last_basic_block]);
+ sbitmap_difference (insert[x], later[x], laterin[n_basic_blocks]);
else
- sbitmap_difference (insert[x], later[x], laterin[b->sindex]);
+ sbitmap_difference (insert[x], later[x], laterin[b->index]);
}
}
@@ -413,29 +415,29 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
fprintf (file, "Edge List:\n");
verify_edge_list (file, edge_list);
print_edge_list (file, edge_list);
- dump_sbitmap_vector (file, "transp", "", transp, last_basic_block);
- dump_sbitmap_vector (file, "antloc", "", antloc, last_basic_block);
- dump_sbitmap_vector (file, "avloc", "", avloc, last_basic_block);
- dump_sbitmap_vector (file, "kill", "", kill, last_basic_block);
+ dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
+ dump_sbitmap_vector (file, "antloc", "", antloc, n_basic_blocks);
+ dump_sbitmap_vector (file, "avloc", "", avloc, n_basic_blocks);
+ dump_sbitmap_vector (file, "kill", "", kill, n_basic_blocks);
}
#endif
/* Compute global availability. */
- avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
- avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
compute_available (avloc, kill, avout, avin);
sbitmap_vector_free (avin);
/* Compute global anticipatability. */
- antin = sbitmap_vector_alloc (last_basic_block, n_exprs);
- antout = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ antin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ antout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
compute_antinout_edge (antloc, transp, antin, antout);
#ifdef LCM_DEBUG_INFO
if (file)
{
- dump_sbitmap_vector (file, "antin", "", antin, last_basic_block);
- dump_sbitmap_vector (file, "antout", "", antout, last_basic_block);
+ dump_sbitmap_vector (file, "antin", "", antin, n_basic_blocks);
+ dump_sbitmap_vector (file, "antout", "", antout, n_basic_blocks);
}
#endif
@@ -455,13 +457,13 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
later = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the exit block in the laterin vector. */
- laterin = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
+ laterin = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
compute_laterin (edge_list, earliest, antloc, later, laterin);
#ifdef LCM_DEBUG_INFO
if (file)
{
- dump_sbitmap_vector (file, "laterin", "", laterin, last_basic_block + 1);
+ dump_sbitmap_vector (file, "laterin", "", laterin, n_basic_blocks + 1);
dump_sbitmap_vector (file, "later", "", later, num_edges);
}
#endif
@@ -469,7 +471,7 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
sbitmap_vector_free (earliest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
compute_insert_delete (edge_list, antloc, later, laterin, *insert, *delete);
sbitmap_vector_free (laterin);
@@ -480,7 +482,7 @@ pre_edge_lcm (file, n_exprs, transp, avloc, antloc, kill, insert, delete)
{
dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
- last_basic_block);
+ n_basic_blocks);
}
#endif
@@ -494,30 +496,31 @@ void
compute_available (avloc, kill, avout, avin)
sbitmap *avloc, *kill, *avout, *avin;
{
+ int bb;
edge e;
- basic_block *worklist, *qin, *qout, *qend, bb;
+ basic_block *worklist, *qin, *qout, *qend;
unsigned int qlen;
/* Allocate a worklist array/queue. Entries are only added to the
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
qin = qout = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * num_basic_blocks);
+ = (basic_block *) xmalloc (sizeof (basic_block) * n_basic_blocks);
/* We want a maximal solution. */
- sbitmap_vector_ones (avout, last_basic_block);
+ sbitmap_vector_ones (avout, n_basic_blocks);
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of AVOUT above. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- *qin++ = bb;
- bb->aux = bb;
+ *qin++ = BASIC_BLOCK (bb);
+ BASIC_BLOCK (bb)->aux = BASIC_BLOCK (bb);
}
qin = worklist;
- qend = &worklist[num_basic_blocks];
- qlen = num_basic_blocks;
+ qend = &worklist[n_basic_blocks];
+ qlen = n_basic_blocks;
/* Mark blocks which are successors of the entry block so that we
can easily identify them below. */
@@ -528,7 +531,8 @@ compute_available (avloc, kill, avout, avin)
while (qlen)
{
/* Take the first entry off the worklist. */
- basic_block bb = *qout++;
+ basic_block b = *qout++;
+ bb = b->index;
qlen--;
if (qout >= qend)
@@ -537,24 +541,23 @@ compute_available (avloc, kill, avout, avin)
/* If one of the predecessor blocks is the ENTRY block, then the
intersection of avouts is the null set. We can identify such blocks
by the special value in the AUX field in the block structure. */
- if (bb->aux == ENTRY_BLOCK_PTR)
+ if (b->aux == ENTRY_BLOCK_PTR)
/* Do not clear the aux field for blocks which are successors of the
ENTRY block. That way we never add then to the worklist again. */
- sbitmap_zero (avin[bb->sindex]);
+ sbitmap_zero (avin[bb]);
else
{
/* Clear the aux field of this block so that it can be added to
the worklist again if necessary. */
- bb->aux = NULL;
- sbitmap_intersection_of_preds (avin[bb->sindex], avout, bb->sindex);
+ b->aux = NULL;
+ sbitmap_intersection_of_preds (avin[bb], avout, bb);
}
- if (sbitmap_union_of_diff_cg (avout[bb->sindex], avloc[bb->sindex],
- avin[bb->sindex], kill[bb->sindex]))
+ if (sbitmap_union_of_diff_cg (avout[bb], avloc[bb], avin[bb], kill[bb]))
/* If the out state of this block changed, then we need
to add the successors of this block to the worklist
if they are not already on the worklist. */
- for (e = bb->succ; e; e = e->succ_next)
+ for (e = b->succ; e; e = e->succ_next)
if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR)
{
*qin++ = e->dest;
@@ -594,18 +597,18 @@ compute_farthest (edge_list, n_exprs, st_avout, st_avin, st_antin,
pred = INDEX_EDGE_PRED_BB (edge_list, x);
succ = INDEX_EDGE_SUCC_BB (edge_list, x);
if (succ == EXIT_BLOCK_PTR)
- sbitmap_copy (farthest[x], st_avout[pred->sindex]);
+ sbitmap_copy (farthest[x], st_avout[pred->index]);
else
{
if (pred == ENTRY_BLOCK_PTR)
sbitmap_zero (farthest[x]);
else
{
- sbitmap_difference (difference, st_avout[pred->sindex],
- st_antin[succ->sindex]);
- sbitmap_not (temp_bitmap, st_avin[succ->sindex]);
+ sbitmap_difference (difference, st_avout[pred->index],
+ st_antin[succ->index]);
+ sbitmap_not (temp_bitmap, st_avin[succ->index]);
sbitmap_a_and_b_or_c (farthest[x], difference,
- kill[succ->sindex], temp_bitmap);
+ kill[succ->index], temp_bitmap);
}
}
}
@@ -624,9 +627,9 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
struct edge_list *edge_list;
sbitmap *farthest, *st_avloc, *nearer, *nearerout;
{
- int num_edges, i;
+ int bb, num_edges, i;
edge e;
- basic_block *worklist, *tos, bb;
+ basic_block *worklist, *tos;
num_edges = NUM_EDGES (edge_list);
@@ -634,7 +637,7 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
list if they were not already on the list. So the size is
bounded by the number of basic blocks. */
tos = worklist
- = (basic_block *) xmalloc (sizeof (basic_block) * (num_basic_blocks + 1));
+ = (basic_block *) xmalloc (sizeof (basic_block) * (n_basic_blocks + 1));
/* Initialize NEARER for each edge and build a mapping from an edge to
its index. */
@@ -653,31 +656,33 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
/* Add all the blocks to the worklist. This prevents an early exit
from the loop given our optimistic initialization of NEARER. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- *tos++ = bb;
- bb->aux = bb;
+ basic_block b = BASIC_BLOCK (bb);
+ *tos++ = b;
+ b->aux = b;
}
/* Iterate until the worklist is empty. */
while (tos != worklist)
{
/* Take the first entry off the worklist. */
- bb = *--tos;
- bb->aux = NULL;
+ basic_block b = *--tos;
+ b->aux = NULL;
/* Compute the intersection of NEARER for each outgoing edge from B. */
- sbitmap_ones (nearerout[bb->sindex]);
- for (e = bb->succ; e != NULL; e = e->succ_next)
- sbitmap_a_and_b (nearerout[bb->sindex], nearerout[bb->sindex],
+ bb = b->index;
+ sbitmap_ones (nearerout[bb]);
+ for (e = b->succ; e != NULL; e = e->succ_next)
+ sbitmap_a_and_b (nearerout[bb], nearerout[bb],
nearer[(size_t) e->aux]);
/* Calculate NEARER for all incoming edges. */
- for (e = bb->pred; e != NULL; e = e->pred_next)
+ for (e = b->pred; e != NULL; e = e->pred_next)
if (sbitmap_union_of_diff_cg (nearer[(size_t) e->aux],
- farthest[(size_t) e->aux],
- nearerout[e->dest->sindex],
- st_avloc[e->dest->sindex])
+ farthest[(size_t) e->aux],
+ nearerout[e->dest->index],
+ st_avloc[e->dest->index])
/* If NEARER for an incoming edge was changed, then we need
to add the source of the incoming edge to the worklist. */
&& e->src != ENTRY_BLOCK_PTR && e->src->aux == 0)
@@ -690,10 +695,10 @@ compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout)
/* Computation of insertion and deletion points requires computing NEAREROUT
for the ENTRY block. We allocated an extra entry in the NEAREROUT array
for just this purpose. */
- sbitmap_ones (nearerout[last_basic_block]);
+ sbitmap_ones (nearerout[n_basic_blocks]);
for (e = ENTRY_BLOCK_PTR->succ; e != NULL; e = e->succ_next)
- sbitmap_a_and_b (nearerout[last_basic_block],
- nearerout[last_basic_block],
+ sbitmap_a_and_b (nearerout[n_basic_blocks],
+ nearerout[n_basic_blocks],
nearer[(size_t) e->aux]);
clear_aux_for_edges ();
@@ -709,19 +714,17 @@ compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
sbitmap *st_avloc, *nearer, *nearerout, *insert, *delete;
{
int x;
- basic_block bb;
- FOR_ALL_BB (bb)
- sbitmap_difference (delete[bb->sindex], st_avloc[bb->sindex],
- nearerout[bb->sindex]);
+ for (x = 0; x < n_basic_blocks; x++)
+ sbitmap_difference (delete[x], st_avloc[x], nearerout[x]);
for (x = 0; x < NUM_EDGES (edge_list); x++)
{
basic_block b = INDEX_EDGE_PRED_BB (edge_list, x);
if (b == ENTRY_BLOCK_PTR)
- sbitmap_difference (insert[x], nearer[x], nearerout[last_basic_block]);
+ sbitmap_difference (insert[x], nearer[x], nearerout[n_basic_blocks]);
else
- sbitmap_difference (insert[x], nearer[x], nearerout[b->sindex]);
+ sbitmap_difference (insert[x], nearer[x], nearerout[b->index]);
}
}
@@ -751,15 +754,15 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
edge_list = create_edge_list ();
num_edges = NUM_EDGES (edge_list);
- st_antin = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
- st_antout = (sbitmap *) sbitmap_vector_alloc (last_basic_block, n_exprs);
- sbitmap_vector_zero (st_antin, last_basic_block);
- sbitmap_vector_zero (st_antout, last_basic_block);
+ st_antin = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ st_antout = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ sbitmap_vector_zero (st_antin, n_basic_blocks);
+ sbitmap_vector_zero (st_antout, n_basic_blocks);
compute_antinout_edge (st_antloc, transp, st_antin, st_antout);
/* Compute global anticipatability. */
- st_avout = sbitmap_vector_alloc (last_basic_block, n_exprs);
- st_avin = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ st_avout = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
+ st_avin = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
compute_available (st_avloc, kill, st_avout, st_avin);
#ifdef LCM_DEBUG_INFO
@@ -768,20 +771,20 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
fprintf (file, "Edge List:\n");
verify_edge_list (file, edge_list);
print_edge_list (file, edge_list);
- dump_sbitmap_vector (file, "transp", "", transp, last_basic_block);
- dump_sbitmap_vector (file, "st_avloc", "", st_avloc, last_basic_block);
- dump_sbitmap_vector (file, "st_antloc", "", st_antloc, last_basic_block);
- dump_sbitmap_vector (file, "st_antin", "", st_antin, last_basic_block);
- dump_sbitmap_vector (file, "st_antout", "", st_antout, last_basic_block);
- dump_sbitmap_vector (file, "st_kill", "", kill, last_basic_block);
+ dump_sbitmap_vector (file, "transp", "", transp, n_basic_blocks);
+ dump_sbitmap_vector (file, "st_avloc", "", st_avloc, n_basic_blocks);
+ dump_sbitmap_vector (file, "st_antloc", "", st_antloc, n_basic_blocks);
+ dump_sbitmap_vector (file, "st_antin", "", st_antin, n_basic_blocks);
+ dump_sbitmap_vector (file, "st_antout", "", st_antout, n_basic_blocks);
+ dump_sbitmap_vector (file, "st_kill", "", kill, n_basic_blocks);
}
#endif
#ifdef LCM_DEBUG_INFO
if (file)
{
- dump_sbitmap_vector (file, "st_avout", "", st_avout, last_basic_block);
- dump_sbitmap_vector (file, "st_avin", "", st_avin, last_basic_block);
+ dump_sbitmap_vector (file, "st_avout", "", st_avout, n_basic_blocks);
+ dump_sbitmap_vector (file, "st_avin", "", st_avin, n_basic_blocks);
}
#endif
@@ -804,14 +807,14 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
nearer = sbitmap_vector_alloc (num_edges, n_exprs);
/* Allocate an extra element for the entry block. */
- nearerout = sbitmap_vector_alloc (last_basic_block + 1, n_exprs);
+ nearerout = sbitmap_vector_alloc (n_basic_blocks + 1, n_exprs);
compute_nearerout (edge_list, farthest, st_avloc, nearer, nearerout);
#ifdef LCM_DEBUG_INFO
if (file)
{
dump_sbitmap_vector (file, "nearerout", "", nearerout,
- last_basic_block + 1);
+ n_basic_blocks + 1);
dump_sbitmap_vector (file, "nearer", "", nearer, num_edges);
}
#endif
@@ -819,7 +822,7 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
sbitmap_vector_free (farthest);
*insert = sbitmap_vector_alloc (num_edges, n_exprs);
- *delete = sbitmap_vector_alloc (last_basic_block, n_exprs);
+ *delete = sbitmap_vector_alloc (n_basic_blocks, n_exprs);
compute_rev_insert_delete (edge_list, st_avloc, nearer, nearerout,
*insert, *delete);
@@ -831,7 +834,7 @@ pre_edge_rev_lcm (file, n_exprs, transp, st_avloc, st_antloc, kill,
{
dump_sbitmap_vector (file, "pre_insert_map", "", *insert, num_edges);
dump_sbitmap_vector (file, "pre_delete_map", "", *delete,
- last_basic_block);
+ n_basic_blocks);
}
#endif
return edge_list;
@@ -957,10 +960,10 @@ make_preds_opaque (b, j)
{
basic_block pb = e->src;
- if (e->aux || ! TEST_BIT (transp[pb->sindex], j))
+ if (e->aux || ! TEST_BIT (transp[pb->index], j))
continue;
- RESET_BIT (transp[pb->sindex], j);
+ RESET_BIT (transp[pb->index], j);
make_preds_opaque (pb, j);
}
}
@@ -1016,8 +1019,7 @@ optimize_mode_switching (file)
FILE *file;
{
rtx insn;
- int e;
- basic_block bb;
+ int bb, e;
int need_commit = 0;
sbitmap *kill;
struct edge_list *edge_list;
@@ -1032,8 +1034,8 @@ optimize_mode_switching (file)
clear_bb_flags ();
#ifdef NORMAL_MODE
- /* Increment last_basic_block before allocating bb_info. */
- last_basic_block++;
+ /* Increment n_basic_blocks before allocating bb_info. */
+ n_basic_blocks++;
#endif
for (e = N_ENTITIES - 1, n_entities = 0; e >= 0; e--)
@@ -1041,7 +1043,7 @@ optimize_mode_switching (file)
{
/* Create the list of segments within each basic block. */
bb_info[n_entities]
- = (struct bb_info *) xcalloc (last_basic_block, sizeof **bb_info);
+ = (struct bb_info *) xcalloc (n_basic_blocks, sizeof **bb_info);
entity_map[n_entities++] = e;
if (num_modes[e] > max_num_modes)
max_num_modes = num_modes[e];
@@ -1049,7 +1051,7 @@ optimize_mode_switching (file)
#ifdef NORMAL_MODE
/* Decrement it back in case we return below. */
- last_basic_block--;
+ n_basic_blocks--;
#endif
if (! n_entities)
@@ -1061,20 +1063,20 @@ optimize_mode_switching (file)
EXIT_BLOCK isn't optimized away. We do this by incrementing the
basic block count, growing the VARRAY of basic_block_info and
appending the EXIT_BLOCK_PTR to it. */
- last_basic_block++;
- if (VARRAY_SIZE (basic_block_info) < last_basic_block)
- VARRAY_GROW (basic_block_info, last_basic_block);
- BASIC_BLOCK (last_basic_block - 1) = EXIT_BLOCK_PTR;
- EXIT_BLOCK_PTR->sindex = last_basic_blocks;
+ n_basic_blocks++;
+ if (VARRAY_SIZE (basic_block_info) < n_basic_blocks)
+ VARRAY_GROW (basic_block_info, n_basic_blocks);
+ BASIC_BLOCK (n_basic_blocks - 1) = EXIT_BLOCK_PTR;
+ EXIT_BLOCK_PTR->index = n_basic_blocks - 1;
#endif
/* Create the bitmap vectors. */
- antic = sbitmap_vector_alloc (last_basic_block, n_entities);
- transp = sbitmap_vector_alloc (last_basic_block, n_entities);
- comp = sbitmap_vector_alloc (last_basic_block, n_entities);
+ antic = sbitmap_vector_alloc (n_basic_blocks, n_entities);
+ transp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
+ comp = sbitmap_vector_alloc (n_basic_blocks, n_entities);
- sbitmap_vector_ones (transp, last_basic_block);
+ sbitmap_vector_ones (transp, n_basic_blocks);
for (j = n_entities - 1; j >= 0; j--)
{
@@ -1085,16 +1087,16 @@ optimize_mode_switching (file)
/* Determine what the first use (if any) need for a mode of entity E is.
This will be the mode that is anticipatable for this block.
Also compute the initial transparency settings. */
- FOR_ALL_BB (bb)
+ for (bb = 0 ; bb < n_basic_blocks; bb++)
{
struct seginfo *ptr;
int last_mode = no_mode;
HARD_REG_SET live_now;
REG_SET_TO_HARD_REG_SET (live_now,
- bb->global_live_at_start);
- for (insn = bb->head;
- insn != NULL && insn != NEXT_INSN (bb->end);
+ BASIC_BLOCK (bb)->global_live_at_start);
+ for (insn = BLOCK_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
{
if (INSN_P (insn))
@@ -1105,9 +1107,9 @@ optimize_mode_switching (file)
if (mode != no_mode && mode != last_mode)
{
last_mode = mode;
- ptr = new_seginfo (mode, insn, bb->sindex, live_now);
- add_seginfo (info + bb->sindex, ptr);
- RESET_BIT (transp[bb->sindex], j);
+ ptr = new_seginfo (mode, insn, bb, live_now);
+ add_seginfo (info + bb, ptr);
+ RESET_BIT (transp[bb], j);
}
/* Update LIVE_NOW. */
@@ -1122,12 +1124,12 @@ optimize_mode_switching (file)
}
}
- info[bb->sindex].computing = last_mode;
+ info[bb].computing = last_mode;
/* Check for blocks without ANY mode requirements. */
if (last_mode == no_mode)
{
- ptr = new_seginfo (no_mode, insn, bb->sindex, live_now);
- add_seginfo (info + bb->sindex, ptr);
+ ptr = new_seginfo (no_mode, insn, bb, live_now);
+ add_seginfo (info + bb, ptr);
}
}
#ifdef NORMAL_MODE
@@ -1140,65 +1142,65 @@ optimize_mode_switching (file)
for (eg = ENTRY_BLOCK_PTR->succ; eg; eg = eg->succ_next)
{
- bb = eg->dest;
+ bb = eg->dest->index;
/* By always making this nontransparent, we save
an extra check in make_preds_opaque. We also
need this to avoid confusing pre_edge_lcm when
antic is cleared but transp and comp are set. */
- RESET_BIT (transp[bb->sindex], j);
+ RESET_BIT (transp[bb], j);
/* If the block already has MODE, pretend it
has none (because we don't need to set it),
but retain whatever mode it computes. */
- if (info[bb->sindex].seginfo->mode == mode)
- info[bb->sindex].seginfo->mode = no_mode;
+ if (info[bb].seginfo->mode == mode)
+ info[bb].seginfo->mode = no_mode;
/* Insert a fake computing definition of MODE into entry
blocks which compute no mode. This represents the mode on
entry. */
- else if (info[bb->sindex].computing == no_mode)
+ else if (info[bb].computing == no_mode)
{
- info[bb->sindex].computing = mode;
- info[bb->sindex].seginfo->mode = no_mode;
+ info[bb].computing = mode;
+ info[bb].seginfo->mode = no_mode;
}
}
- bb = EXIT_BLOCK_PTR;
- info[bb->sindex].seginfo->mode = mode;
+ bb = n_basic_blocks - 1;
+ info[bb].seginfo->mode = mode;
}
}
#endif /* NORMAL_MODE */
}
- kill = sbitmap_vector_alloc (last_basic_block, n_entities);
+ kill = sbitmap_vector_alloc (n_basic_blocks, n_entities);
for (i = 0; i < max_num_modes; i++)
{
int current_mode[N_ENTITIES];
/* Set the anticipatable and computing arrays. */
- sbitmap_vector_zero (antic, last_basic_block);
- sbitmap_vector_zero (comp, last_basic_block);
+ sbitmap_vector_zero (antic, n_basic_blocks);
+ sbitmap_vector_zero (comp, n_basic_blocks);
for (j = n_entities - 1; j >= 0; j--)
{
int m = current_mode[j] = MODE_PRIORITY_TO_MODE (entity_map[j], i);
struct bb_info *info = bb_info[j];
- FOR_ALL_BB (bb)
+ for (bb = 0 ; bb < n_basic_blocks; bb++)
{
- if (info[bb->sindex].seginfo->mode == m)
- SET_BIT (antic[bb->sindex], j);
+ if (info[bb].seginfo->mode == m)
+ SET_BIT (antic[bb], j);
- if (info[bb->sindex].computing == m)
- SET_BIT (comp[bb->sindex], j);
+ if (info[bb].computing == m)
+ SET_BIT (comp[bb], j);
}
}
/* Calculate the optimal locations for the
placement mode switches to modes with priority I. */
- FOR_ALL_BB_REVERSE (bb)
- sbitmap_not (kill[bb->sindex], transp[bb->sindex]);
+ for (bb = n_basic_blocks - 1; bb >= 0; bb--)
+ sbitmap_not (kill[bb], transp[bb]);
edge_list = pre_edge_lcm (file, 1, transp, comp, antic,
kill, &insert, &delete);
@@ -1267,8 +1269,8 @@ optimize_mode_switching (file)
emit_insn_after (mode_set, src_bb->end);
else
abort ();
- bb_info[j][src_bb->sindex].computing = mode;
- RESET_BIT (transp[src_bb->sindex], j);
+ bb_info[j][src_bb->index].computing = mode;
+ RESET_BIT (transp[src_bb->index], j);
}
else
{
@@ -1277,12 +1279,12 @@ optimize_mode_switching (file)
}
}
- FOR_ALL_BB_REVERSE (bb)
- if (TEST_BIT (delete[bb->sindex], j))
+ for (bb = n_basic_blocks - 1; bb >= 0; bb--)
+ if (TEST_BIT (delete[bb], j))
{
- make_preds_opaque (bb, j);
+ make_preds_opaque (BASIC_BLOCK (bb), j);
/* Cancel the 'deleted' mode set. */
- bb_info[j][bb->sindex].seginfo->mode = no_mode;
+ bb_info[j][bb].seginfo->mode = no_mode;
}
}
@@ -1292,9 +1294,9 @@ optimize_mode_switching (file)
#ifdef NORMAL_MODE
/* Restore the special status of EXIT_BLOCK. */
- last_basic_block--;
+ n_basic_blocks--;
VARRAY_POP (basic_block_info);
- EXIT_BLOCK_PTR->sindex = EXIT_BLOCK;
+ EXIT_BLOCK_PTR->index = EXIT_BLOCK;
#endif
/* Now output the remaining mode sets in all the segments. */
@@ -1303,16 +1305,16 @@ optimize_mode_switching (file)
int no_mode = num_modes[entity_map[j]];
#ifdef NORMAL_MODE
- if (bb_info[j][last_basic_block].seginfo->mode != no_mode)
+ if (bb_info[j][n_basic_blocks].seginfo->mode != no_mode)
{
edge eg;
- struct seginfo *ptr = bb_info[j][last_basic_block].seginfo;
+ struct seginfo *ptr = bb_info[j][n_basic_blocks].seginfo;
for (eg = EXIT_BLOCK_PTR->pred; eg; eg = eg->pred_next)
{
rtx mode_set;
- if (bb_info[j][eg->src->sindex].computing == ptr->mode)
+ if (bb_info[j][eg->src->index].computing == ptr->mode)
continue;
start_sequence ();
@@ -1347,10 +1349,10 @@ optimize_mode_switching (file)
}
#endif
- FOR_ALL_BB_REVERSE (bb)
+ for (bb = n_basic_blocks - 1; bb >= 0; bb--)
{
struct seginfo *ptr, *next;
- for (ptr = bb_info[j][bb->sindex].seginfo; ptr; ptr = next)
+ for (ptr = bb_info[j][bb].seginfo; ptr; ptr = next)
{
next = ptr->next;
if (ptr->mode != no_mode)
diff --git a/gcc/local-alloc.c b/gcc/local-alloc.c
index 860856c..cd216f9 100644
--- a/gcc/local-alloc.c
+++ b/gcc/local-alloc.c
@@ -336,9 +336,8 @@ alloc_qty (regno, mode, size, birth)
int
local_alloc ()
{
- int i;
+ int b, i;
int max_qty;
- basic_block b;
/* We need to keep track of whether or not we recorded a LABEL_REF so
that we know if the jump optimizer needs to be rerun. */
@@ -395,7 +394,7 @@ local_alloc ()
/* Allocate each block's local registers, block by block. */
- FOR_ALL_BB (b)
+ for (b = 0; b < n_basic_blocks; b++)
{
/* NEXT_QTY indicates which elements of the `qty_...'
vectors might need to be initialized because they were used
@@ -427,7 +426,7 @@ local_alloc ()
next_qty = 0;
- block_alloc (b->sindex);
+ block_alloc (b);
}
free (qty);
@@ -816,7 +815,7 @@ static void
update_equiv_regs ()
{
rtx insn;
- basic_block bb;
+ int block;
int loop_depth;
regset_head cleared_regs;
int clear_regnos = 0;
@@ -829,8 +828,9 @@ update_equiv_regs ()
/* Scan the insns and find which registers have equivalences. Do this
in a separate scan of the insns because (due to -fcse-follow-jumps)
a register can be set below its use. */
- FOR_ALL_BB (bb)
+ for (block = 0; block < n_basic_blocks; block++)
{
+ basic_block bb = BASIC_BLOCK (block);
loop_depth = bb->loop_depth;
for (insn = bb->head; insn != NEXT_INSN (bb->end); insn = NEXT_INSN (insn))
@@ -1044,8 +1044,10 @@ update_equiv_regs ()
within the same loop (or in an inner loop), then move the register
initialization just before the use, so that they are in the same
basic block. */
- FOR_ALL_BB_REVERSE (bb)
+ for (block = n_basic_blocks - 1; block >= 0; block--)
{
+ basic_block bb = BASIC_BLOCK (block);
+
loop_depth = bb->loop_depth;
for (insn = bb->end; insn != PREV_INSN (bb->head); insn = PREV_INSN (insn))
{
@@ -1137,12 +1139,12 @@ update_equiv_regs ()
XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
- REG_BASIC_BLOCK (regno) = bb->sindex;
+ REG_BASIC_BLOCK (regno) = block >= 0 ? block : 0;
REG_N_CALLS_CROSSED (regno) = 0;
REG_LIVE_LENGTH (regno) = 2;
- if (insn == bb->head)
- bb->head = PREV_INSN (insn);
+ if (block >= 0 && insn == BLOCK_HEAD (block))
+ BLOCK_HEAD (block) = PREV_INSN (insn);
/* Remember to clear REGNO from all basic block's live
info. */
@@ -1157,22 +1159,24 @@ update_equiv_regs ()
/* Clear all dead REGNOs from all basic block's live info. */
if (clear_regnos)
{
- int j;
+ int j, l;
if (clear_regnos > 8)
{
- FOR_ALL_BB (bb)
+ for (l = 0; l < n_basic_blocks; l++)
{
- AND_COMPL_REG_SET (bb->global_live_at_start, &cleared_regs);
- AND_COMPL_REG_SET (bb->global_live_at_end, &cleared_regs);
+ AND_COMPL_REG_SET (BASIC_BLOCK (l)->global_live_at_start,
+ &cleared_regs);
+ AND_COMPL_REG_SET (BASIC_BLOCK (l)->global_live_at_end,
+ &cleared_regs);
}
}
else
EXECUTE_IF_SET_IN_REG_SET (&cleared_regs, 0, j,
{
- FOR_ALL_BB (bb)
+ for (l = 0; l < n_basic_blocks; l++)
{
- CLEAR_REGNO_REG_SET (bb->global_live_at_start, j);
- CLEAR_REGNO_REG_SET (bb->global_live_at_end, j);
+ CLEAR_REGNO_REG_SET (BASIC_BLOCK (l)->global_live_at_start, j);
+ CLEAR_REGNO_REG_SET (BASIC_BLOCK (l)->global_live_at_end, j);
}
});
}
diff --git a/gcc/loop.c b/gcc/loop.c
index 7a5b1a4..7d3c5b2 100644
--- a/gcc/loop.c
+++ b/gcc/loop.c
@@ -10785,7 +10785,7 @@ loop_dump_aux (loop, file, verbose)
/* This can happen when a marked loop appears as two nested loops,
say from while (a || b) {}. The inner loop won't match
the loop markers but the outer one will. */
- if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->sindex)
+ if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
}
}
diff --git a/gcc/predict.c b/gcc/predict.c
index 5eda98c..f457817 100644
--- a/gcc/predict.c
+++ b/gcc/predict.c
@@ -319,7 +319,7 @@ combine_predictions_for_insn (insn, bb)
if (rtl_dump_file)
fprintf (rtl_dump_file, "Predictions for insn %i bb %i\n", INSN_UID (insn),
- bb->sindex);
+ bb->index);
/* We implement "first match" heuristics and use probability guessed
by predictor with smallest index. In the future we will use better
@@ -409,11 +409,10 @@ estimate_probability (loops_info)
struct loops *loops_info;
{
sbitmap *dominators, *post_dominators;
- basic_block bb;
int i;
- dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
- post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
calculate_dominance_info (NULL, dominators, CDI_DOMINATORS);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
@@ -421,14 +420,15 @@ estimate_probability (loops_info)
natural loop. */
for (i = 0; i < loops_info->num; i++)
{
+ int j;
int exits;
struct loop *loop = &loops_info->array[i];
flow_loop_scan (loops_info, loop, LOOP_EXIT_EDGES);
exits = loop->num_exits;
- FOR_BB_BETWEEN (bb, loop->first, loop->last->next_bb, next_bb)
- if (TEST_BIT (loop->nodes, bb->sindex))
+ for (j = loop->first->index; j <= loop->last->index; ++j)
+ if (TEST_BIT (loop->nodes, j))
{
int header_found = 0;
edge e;
@@ -437,12 +437,12 @@ estimate_probability (loops_info)
statements construct loops via "non-loop" constructs
in the source language and are better to be handled
separately. */
- if (predicted_by_p (bb, PRED_CONTINUE))
+ if (predicted_by_p (BASIC_BLOCK (j), PRED_CONTINUE))
continue;
/* Loop branch heuristics - predict an edge back to a
loop's head as taken. */
- for (e = bb->succ; e; e = e->succ_next)
+ for (e = BASIC_BLOCK(j)->succ; e; e = e->succ_next)
if (e->dest == loop->header
&& e->src == loop->latch)
{
@@ -453,9 +453,9 @@ estimate_probability (loops_info)
/* Loop exit heuristics - predict an edge exiting the loop if the
conditinal has no loop header successors as not taken. */
if (!header_found)
- for (e = bb->succ; e; e = e->succ_next)
- if (e->dest->sindex < 0
- || !TEST_BIT (loop->nodes, e->dest->sindex))
+ for (e = BASIC_BLOCK(j)->succ; e; e = e->succ_next)
+ if (e->dest->index < 0
+ || !TEST_BIT (loop->nodes, e->dest->index))
predict_edge
(e, PRED_LOOP_EXIT,
(REG_BR_PROB_BASE
@@ -465,8 +465,9 @@ estimate_probability (loops_info)
}
/* Attempt to predict conditional jumps using a number of heuristics. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx last_insn = bb->end;
rtx cond, earliest;
edge e;
@@ -491,8 +492,8 @@ estimate_probability (loops_info)
/* Look for block we are guarding (ie we dominate it,
but it doesn't postdominate us). */
if (e->dest != EXIT_BLOCK_PTR && e->dest != bb
- && TEST_BIT (dominators[e->dest->sindex], e->src->sindex)
- && !TEST_BIT (post_dominators[e->src->sindex], e->dest->sindex))
+ && TEST_BIT (dominators[e->dest->index], e->src->index)
+ && !TEST_BIT (post_dominators[e->src->index], e->dest->index))
{
rtx insn;
@@ -603,11 +604,11 @@ estimate_probability (loops_info)
}
/* Attach the combined probability to each conditional jump. */
- FOR_ALL_BB (bb)
- if (GET_CODE (bb->end) == JUMP_INSN
- && any_condjump_p (bb->end)
- && bb->succ->succ_next != NULL)
- combine_predictions_for_insn (bb->end, bb);
+ for (i = 0; i < n_basic_blocks; i++)
+ if (GET_CODE (BLOCK_END (i)) == JUMP_INSN
+ && any_condjump_p (BLOCK_END (i))
+ && BASIC_BLOCK (i)->succ->succ_next != NULL)
+ combine_predictions_for_insn (BLOCK_END (i), BASIC_BLOCK (i));
sbitmap_vector_free (post_dominators);
sbitmap_vector_free (dominators);
@@ -694,16 +695,13 @@ static bool
last_basic_block_p (bb)
basic_block bb;
{
- if (bb == EXIT_BLOCK_PTR)
- return false;
-
- return (bb->next_bb == EXIT_BLOCK_PTR
- || (bb->next_bb->next_bb == EXIT_BLOCK_PTR
+ return (bb->index == n_basic_blocks - 1
+ || (bb->index == n_basic_blocks - 2
&& bb->succ && !bb->succ->succ_next
- && bb->succ->dest->next_bb == EXIT_BLOCK_PTR));
+ && bb->succ->dest->index == n_basic_blocks - 1));
}
-/* Sets branch probabilities according to PREDiction and FLAGS. HEADS[bb->sindex]
+/* Sets branch probabilities according to PREDiction and FLAGS. HEADS[bb->index]
should be index of basic block in that we need to alter branch predictions
(i.e. the first of our dominators such that we do not post-dominate it)
(but we fill this information on demand, so -1 may be there in case this
@@ -724,43 +722,43 @@ process_note_prediction (bb, heads, dominators, post_dominators, pred, flags)
taken = flags & IS_TAKEN;
- if (heads[bb->sindex] < 0)
+ if (heads[bb->index] < 0)
{
/* This is first time we need this field in heads array; so
find first dominator that we do not post-dominate (we are
using already known members of heads array). */
- int ai = bb->sindex;
- int next_ai = dominators[bb->sindex];
+ int ai = bb->index;
+ int next_ai = dominators[bb->index];
int head;
while (heads[next_ai] < 0)
{
- if (!TEST_BIT (post_dominators[next_ai], bb->sindex))
+ if (!TEST_BIT (post_dominators[next_ai], bb->index))
break;
heads[next_ai] = ai;
ai = next_ai;
next_ai = dominators[next_ai];
}
- if (!TEST_BIT (post_dominators[next_ai], bb->sindex))
+ if (!TEST_BIT (post_dominators[next_ai], bb->index))
head = next_ai;
else
head = heads[next_ai];
- while (next_ai != bb->sindex)
+ while (next_ai != bb->index)
{
next_ai = ai;
ai = heads[ai];
heads[next_ai] = head;
}
}
- y = heads[bb->sindex];
+ y = heads[bb->index];
/* Now find the edge that leads to our branch and aply the prediction. */
- if (y == last_basic_block)
+ if (y == n_basic_blocks)
return;
for (e = BASIC_BLOCK (y)->succ; e; e = e->succ_next)
- if (e->dest->sindex >= 0
- && TEST_BIT (post_dominators[e->dest->sindex], bb->sindex))
+ if (e->dest->index >= 0
+ && TEST_BIT (post_dominators[e->dest->index], bb->index))
predict_edge_def (e, pred, taken);
}
@@ -833,7 +831,7 @@ process_note_predictions (bb, heads, dominators, post_dominators)
void
note_prediction_to_br_prob ()
{
- basic_block bb;
+ int i;
sbitmap *post_dominators;
int *dominators, *heads;
@@ -841,20 +839,23 @@ note_prediction_to_br_prob ()
add_noreturn_fake_exit_edges ();
connect_infinite_loops_to_exit ();
- dominators = xmalloc (sizeof (int) * last_basic_block);
- memset (dominators, -1, sizeof (int) * last_basic_block);
- post_dominators = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ dominators = xmalloc (sizeof (int) * n_basic_blocks);
+ memset (dominators, -1, sizeof (int) * n_basic_blocks);
+ post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
calculate_dominance_info (NULL, post_dominators, CDI_POST_DOMINATORS);
calculate_dominance_info (dominators, NULL, CDI_DOMINATORS);
- heads = xmalloc (sizeof (int) * last_basic_block);
- memset (heads, -1, sizeof (int) * last_basic_block);
- heads[ENTRY_BLOCK_PTR->next_bb->sindex] = last_basic_block;
+ heads = xmalloc (sizeof (int) * n_basic_blocks);
+ memset (heads, -1, sizeof (int) * n_basic_blocks);
+ heads[0] = n_basic_blocks;
/* Process all prediction notes. */
- FOR_ALL_BB (bb)
- process_note_predictions (bb, heads, dominators, post_dominators);
+ for (i = 0; i < n_basic_blocks; ++i)
+ {
+ basic_block bb = BASIC_BLOCK (i);
+ process_note_predictions (bb, heads, dominators, post_dominators);
+ }
sbitmap_vector_free (post_dominators);
free (dominators);
@@ -902,15 +903,17 @@ static void
propagate_freq (head)
basic_block head;
{
- basic_block bb;
- basic_block last;
+ basic_block bb = head;
+ basic_block last = bb;
edge e;
basic_block nextbb;
+ int n;
/* For each basic block we need to visit count number of his predecessors
we need to visit first. */
- FOR_ALL_BB (bb)
+ for (n = 0; n < n_basic_blocks; n++)
{
+ basic_block bb = BASIC_BLOCK (n);
if (BLOCK_INFO (bb)->tovisit)
{
int count = 0;
@@ -922,14 +925,13 @@ propagate_freq (head)
&& rtl_dump_file && !EDGE_INFO (e)->back_edge)
fprintf (rtl_dump_file,
"Irreducible region hit, ignoring edge to %i->%i\n",
- e->src->sindex, bb->sindex);
+ e->src->index, bb->index);
BLOCK_INFO (bb)->npredecessors = count;
}
}
memcpy (&BLOCK_INFO (head)->frequency, &real_one, sizeof (real_one));
- last = head;
- for (bb = head; bb; bb = nextbb)
+ for (; bb; bb = nextbb)
{
REAL_VALUE_TYPE cyclic_probability, frequency;
@@ -1072,13 +1074,24 @@ static void
counts_to_freqs ()
{
HOST_WIDEST_INT count_max = 1;
- basic_block bb;
+ int i;
+
+ for (i = 0; i < n_basic_blocks; i++)
+ count_max = MAX (BASIC_BLOCK (i)->count, count_max);
- FOR_ALL_BB (bb)
- count_max = MAX (bb->count, count_max);
+ for (i = -2; i < n_basic_blocks; i++)
+ {
+ basic_block bb;
+
+ if (i == -2)
+ bb = ENTRY_BLOCK_PTR;
+ else if (i == -1)
+ bb = EXIT_BLOCK_PTR;
+ else
+ bb = BASIC_BLOCK (i);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
+ bb->frequency = (bb->count * BB_FREQ_MAX + count_max / 2) / count_max;
+ }
}
/* Return true if function is likely to be expensive, so there is no point to
@@ -1091,7 +1104,7 @@ expensive_function_p (threshold)
int threshold;
{
unsigned int sum = 0;
- basic_block bb;
+ int i;
unsigned int limit;
/* We can not compute accurately for large thresholds due to scaled
@@ -1107,8 +1120,9 @@ expensive_function_p (threshold)
/* Maximally BB_FREQ_MAX^2 so overflow won't happen. */
limit = ENTRY_BLOCK_PTR->frequency * threshold;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx insn;
for (insn = bb->head; insn != NEXT_INSN (bb->end);
@@ -1130,7 +1144,7 @@ static void
estimate_bb_frequencies (loops)
struct loops *loops;
{
- basic_block bb;
+ int i;
REAL_VALUE_TYPE freq_max;
enum machine_mode double_mode = TYPE_MODE (double_type_node);
@@ -1152,13 +1166,13 @@ estimate_bb_frequencies (loops)
mark_dfs_back_edges ();
/* Fill in the probability values in flowgraph based on the REG_BR_PROB
notes. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
- rtx last_insn = bb->end;
+ rtx last_insn = BLOCK_END (i);
if (GET_CODE (last_insn) != JUMP_INSN || !any_condjump_p (last_insn)
/* Avoid handling of conditional jumps jumping to fallthru edge. */
- || bb->succ->succ_next == NULL)
+ || BASIC_BLOCK (i)->succ->succ_next == NULL)
{
/* We can predict only conditional jumps at the moment.
Expect each edge to be equally probable.
@@ -1166,14 +1180,14 @@ estimate_bb_frequencies (loops)
int nedges = 0;
edge e;
- for (e = bb->succ; e; e = e->succ_next)
+ for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
{
nedges++;
if (e->probability != 0)
break;
}
if (!e)
- for (e = bb->succ; e; e = e->succ_next)
+ for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
e->probability = (REG_BR_PROB_BASE + nedges / 2) / nedges;
}
}
@@ -1183,10 +1197,17 @@ estimate_bb_frequencies (loops)
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
alloc_aux_for_edges (sizeof (struct edge_info_def));
-
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = -2; i < n_basic_blocks; i++)
{
edge e;
+ basic_block bb;
+
+ if (i == -2)
+ bb = ENTRY_BLOCK_PTR;
+ else if (i == -1)
+ bb = EXIT_BLOCK_PTR;
+ else
+ bb = BASIC_BLOCK (i);
BLOCK_INFO (bb)->tovisit = 0;
for (e = bb->succ; e; e = e->succ_next)
@@ -1205,22 +1226,32 @@ estimate_bb_frequencies (loops)
estimate_loops_at_level (loops->tree_root);
/* Now fake loop around whole function to finalize probabilities. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
- BLOCK_INFO (bb)->tovisit = 1;
+ for (i = 0; i < n_basic_blocks; i++)
+ BLOCK_INFO (BASIC_BLOCK (i))->tovisit = 1;
+ BLOCK_INFO (ENTRY_BLOCK_PTR)->tovisit = 1;
+ BLOCK_INFO (EXIT_BLOCK_PTR)->tovisit = 1;
propagate_freq (ENTRY_BLOCK_PTR);
memcpy (&freq_max, &real_zero, sizeof (real_zero));
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
if (REAL_VALUES_LESS
- (freq_max, BLOCK_INFO (bb)->frequency))
- memcpy (&freq_max, &BLOCK_INFO (bb)->frequency,
+ (freq_max, BLOCK_INFO (BASIC_BLOCK (i))->frequency))
+ memcpy (&freq_max, &BLOCK_INFO (BASIC_BLOCK (i))->frequency,
sizeof (freq_max));
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = -2; i < n_basic_blocks; i++)
{
+ basic_block bb;
REAL_VALUE_TYPE tmp;
+ if (i == -2)
+ bb = ENTRY_BLOCK_PTR;
+ else if (i == -1)
+ bb = EXIT_BLOCK_PTR;
+ else
+ bb = BASIC_BLOCK (i);
+
REAL_ARITHMETIC (tmp, MULT_EXPR, BLOCK_INFO (bb)->frequency,
real_bb_freq_max);
REAL_ARITHMETIC (tmp, RDIV_EXPR, tmp, freq_max);
@@ -1240,14 +1271,14 @@ estimate_bb_frequencies (loops)
static void
compute_function_frequency ()
{
- basic_block bb;
-
+ int i;
if (!profile_info.count_profiles_merged
|| !flag_branch_probabilities)
return;
cfun->function_frequency = FUNCTION_FREQUENCY_UNLIKELY_EXECUTED;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
if (maybe_hot_bb_p (bb))
{
cfun->function_frequency = FUNCTION_FREQUENCY_HOT;
diff --git a/gcc/print-rtl.c b/gcc/print-rtl.c
index c0d2d9b..8cd339a 100644
--- a/gcc/print-rtl.c
+++ b/gcc/print-rtl.c
@@ -265,7 +265,7 @@ print_rtx (in_rtx)
{
basic_block bb = NOTE_BASIC_BLOCK (in_rtx);
if (bb != 0)
- fprintf (outfile, " [bb %d]", bb->sindex);
+ fprintf (outfile, " [bb %d]", bb->index);
break;
}
diff --git a/gcc/profile.c b/gcc/profile.c
index 73dbd0b..60159a3 100644
--- a/gcc/profile.c
+++ b/gcc/profile.c
@@ -73,11 +73,11 @@ struct bb_info
/* Keep all basic block indexes nonnegative in the gcov output. Index 0
is used for entry block, last block exit block. */
#define GCOV_INDEX_TO_BB(i) ((i) == 0 ? ENTRY_BLOCK_PTR \
- : (((i) == last_basic_block + 1) \
+ : (((i) == n_basic_blocks + 1) \
? EXIT_BLOCK_PTR : BASIC_BLOCK ((i)-1)))
#define BB_TO_GCOV_INDEX(bb) ((bb) == ENTRY_BLOCK_PTR ? 0 \
: ((bb) == EXIT_BLOCK_PTR \
- ? last_basic_block + 1 : (bb)->sindex + 1))
+ ? n_basic_blocks + 1 : (bb)->index + 1))
/* Instantiate the profile info structure. */
@@ -137,13 +137,14 @@ static void
instrument_edges (el)
struct edge_list *el;
{
+ int i;
int num_instr_edges = 0;
int num_edges = NUM_EDGES (el);
- basic_block bb;
remove_fake_edges ();
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = 0; i < n_basic_blocks + 2; i++)
{
+ basic_block bb = GCOV_INDEX_TO_BB (i);
edge e = bb->succ;
while (e)
{
@@ -154,7 +155,7 @@ instrument_edges (el)
abort ();
if (rtl_dump_file)
fprintf (rtl_dump_file, "Edge %d to %d instrumented%s\n",
- e->src->sindex, e->dest->sindex,
+ e->src->index, e->dest->index,
EDGE_CRITICAL_P (e) ? " (and split)" : "");
need_func_profiler = 1;
insert_insn_on_edge (
@@ -215,8 +216,8 @@ static gcov_type *
get_exec_counts ()
{
int num_edges = 0;
- basic_block bb;
- int okay = 1, j;
+ int i;
+ int okay = 1;
int mismatch = 0;
gcov_type *profile;
char *function_name_buffer;
@@ -232,12 +233,15 @@ get_exec_counts ()
/* Count the edges to be (possibly) instrumented. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = 0; i < n_basic_blocks + 2; i++)
{
+ basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
- num_edges++;
+ {
+ num_edges++;
+ }
}
/* now read and combine all matching profiles. */
@@ -247,8 +251,8 @@ get_exec_counts ()
function_name_buffer_len = strlen (current_function_name) + 1;
function_name_buffer = xmalloc (function_name_buffer_len + 1);
- for (j = 0; j < num_edges; j++)
- profile[j] = 0;
+ for (i = 0; i < num_edges; i++)
+ profile[i] = 0;
while (1)
{
@@ -372,8 +376,8 @@ get_exec_counts ()
static void
compute_branch_probabilities ()
{
- basic_block bb;
- int num_edges = 0, i;
+ int i;
+ int num_edges = 0;
int changes;
int passes;
int hist_br_prob[20];
@@ -385,8 +389,9 @@ compute_branch_probabilities ()
/* Attach extra info block to each bb. */
alloc_aux_for_blocks (sizeof (struct bb_info));
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = 0; i < n_basic_blocks + 2; i++)
{
+ basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
@@ -407,8 +412,9 @@ compute_branch_probabilities ()
/* The first count in the .da file is the number of times that the function
was entered. This is the exec_count for block zero. */
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = 0; i < n_basic_blocks + 2; i++)
{
+ basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
if (!EDGE_INFO (e)->ignore && !EDGE_INFO (e)->on_tree)
@@ -427,7 +433,7 @@ compute_branch_probabilities ()
if (rtl_dump_file)
{
fprintf (rtl_dump_file, "\nRead edge from %i to %i, count:",
- bb->sindex, e->dest->sindex);
+ bb->index, e->dest->index);
fprintf (rtl_dump_file, HOST_WIDEST_INT_PRINT_DEC,
(HOST_WIDEST_INT) e->count);
}
@@ -460,8 +466,9 @@ compute_branch_probabilities ()
{
passes++;
changes = 0;
- FOR_BB_BETWEEN (bb, EXIT_BLOCK_PTR, NULL, prev_bb)
+ for (i = n_basic_blocks + 1; i >= 0; i--)
{
+ basic_block bb = GCOV_INDEX_TO_BB (i);
struct bb_info *bi = BB_INFO (bb);
if (! bi->count_valid)
{
@@ -556,8 +563,9 @@ compute_branch_probabilities ()
/* If the graph has been correctly solved, every block will have a
succ and pred count of zero. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
if (BB_INFO (bb)->succ_count || BB_INFO (bb)->pred_count)
abort ();
}
@@ -570,8 +578,9 @@ compute_branch_probabilities ()
num_never_executed = 0;
num_branches = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = 0; i <= n_basic_blocks + 1; i++)
{
+ basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
gcov_type total;
rtx note;
@@ -585,11 +594,11 @@ compute_branch_probabilities ()
if (e->probability < 0 || e->probability > REG_BR_PROB_BASE)
{
error ("corrupted profile info: prob for %d-%d thought to be %d",
- e->src->sindex, e->dest->sindex, e->probability);
+ e->src->index, e->dest->index, e->probability);
e->probability = REG_BR_PROB_BASE / 2;
}
}
- if (bb->sindex >= 0
+ if (bb->index >= 0
&& any_condjump_p (bb->end)
&& bb->succ->succ_next)
{
@@ -646,7 +655,7 @@ compute_branch_probabilities ()
for (e = bb->succ; e; e = e->succ_next)
e->probability = REG_BR_PROB_BASE / total;
}
- if (bb->sindex >= 0
+ if (bb->index >= 0
&& any_condjump_p (bb->end)
&& bb->succ->succ_next)
num_branches++, num_never_executed;
@@ -687,10 +696,12 @@ static long
compute_checksum ()
{
long chsum = 0;
- basic_block bb;
+ int i;
- FOR_ALL_BB (bb)
+
+ for (i = 0; i < n_basic_blocks ; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
edge e;
for (e = bb->succ; e; e = e->succ_next)
@@ -723,7 +734,6 @@ compute_checksum ()
void
branch_prob ()
{
- basic_block bb;
int i;
int num_edges, ignored_edges;
struct edge_list *el;
@@ -752,10 +762,11 @@ branch_prob ()
We also add fake exit edges for each call and asm statement in the
basic, since it may not return. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks ; i++)
{
int need_exit_edge = 0, need_entry_edge = 0;
int have_exit_edge = 0, have_entry_edge = 0;
+ basic_block bb = BASIC_BLOCK (i);
rtx insn;
edge e;
@@ -780,7 +791,7 @@ branch_prob ()
{
/* We should not get abort here, as call to setjmp should not
be the very first instruction of function. */
- if (bb == ENTRY_BLOCK_PTR)
+ if (!i)
abort ();
make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
}
@@ -808,14 +819,14 @@ branch_prob ()
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Adding fake exit edge to bb %i\n",
- bb->sindex);
+ bb->index);
make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
}
if (need_entry_edge && !have_entry_edge)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Adding fake entry edge to bb %i\n",
- bb->sindex);
+ bb->index);
make_edge (ENTRY_BLOCK_PTR, bb, EDGE_FAKE);
}
}
@@ -847,10 +858,10 @@ branch_prob ()
GCOV utility. */
if (flag_test_coverage)
{
- basic_block bb;
-
- FOR_ALL_BB (bb)
+ int i = 0;
+ for (i = 0 ; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx insn = bb->head;
static int ignore_next_note = 0;
@@ -928,9 +939,9 @@ branch_prob ()
}
}
- total_num_blocks += num_basic_blocks + 2;
+ total_num_blocks += n_basic_blocks + 2;
if (rtl_dump_file)
- fprintf (rtl_dump_file, "%d basic blocks\n", num_basic_blocks);
+ fprintf (rtl_dump_file, "%d basic blocks\n", n_basic_blocks);
total_num_edges += num_edges;
if (rtl_dump_file)
@@ -956,11 +967,12 @@ branch_prob ()
__write_long (profile_info.current_function_cfg_checksum, bbg_file, 4);
/* The plus 2 stands for entry and exit block. */
- __write_long (num_basic_blocks + 2, bbg_file, 4);
+ __write_long (n_basic_blocks + 2, bbg_file, 4);
__write_long (num_edges - ignored_edges + 1, bbg_file, 4);
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR, NULL, next_bb)
+ for (i = 0; i < n_basic_blocks + 1; i++)
{
+ basic_block bb = GCOV_INDEX_TO_BB (i);
edge e;
long count = 0;
@@ -1069,14 +1081,13 @@ find_spanning_tree (el)
struct edge_list *el;
{
int i;
- basic_block bb;
int num_edges = NUM_EDGES (el);
/* We use aux field for standard union-find algorithm. */
EXIT_BLOCK_PTR->aux = EXIT_BLOCK_PTR;
ENTRY_BLOCK_PTR->aux = ENTRY_BLOCK_PTR;
- FOR_ALL_BB (bb)
- bb->aux = bb;
+ for (i = 0; i < n_basic_blocks; i++)
+ BASIC_BLOCK (i)->aux = BASIC_BLOCK (i);
/* Add fake edge exit to entry we can't instrument. */
union_groups (EXIT_BLOCK_PTR, ENTRY_BLOCK_PTR);
@@ -1095,7 +1106,7 @@ find_spanning_tree (el)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Abnormal edge %d to %d put to tree\n",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
EDGE_INFO (e)->on_tree = 1;
union_groups (e->src, e->dest);
}
@@ -1111,7 +1122,7 @@ find_spanning_tree (el)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Critical edge %d to %d put to tree\n",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
EDGE_INFO (e)->on_tree = 1;
union_groups (e->src, e->dest);
}
@@ -1126,7 +1137,7 @@ find_spanning_tree (el)
{
if (rtl_dump_file)
fprintf (rtl_dump_file, "Normal edge %d to %d put to tree\n",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
EDGE_INFO (e)->on_tree = 1;
union_groups (e->src, e->dest);
}
@@ -1134,8 +1145,8 @@ find_spanning_tree (el)
EXIT_BLOCK_PTR->aux = NULL;
ENTRY_BLOCK_PTR->aux = NULL;
- FOR_ALL_BB (bb)
- bb->aux = NULL;
+ for (i = 0; i < n_basic_blocks; i++)
+ BASIC_BLOCK (i)->aux = NULL;
}
/* Perform file-level initialization for branch-prob processing. */
diff --git a/gcc/recog.c b/gcc/recog.c
index adfef3b9..c3dbee2 100644
--- a/gcc/recog.c
+++ b/gcc/recog.c
@@ -2727,14 +2727,15 @@ split_all_insns (upd_life)
{
sbitmap blocks;
int changed;
- basic_block bb;
+ int i;
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (blocks);
changed = 0;
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx insn, next;
bool finish = false;
@@ -2755,7 +2756,7 @@ split_all_insns (upd_life)
while (GET_CODE (last) == BARRIER)
last = PREV_INSN (last);
- SET_BIT (blocks, bb->sindex);
+ SET_BIT (blocks, i);
changed = 1;
insn = last;
}
@@ -2998,8 +2999,7 @@ peephole2_optimize (dump_file)
regset_head rs_heads[MAX_INSNS_PER_PEEP2 + 2];
rtx insn, prev;
regset live;
- int i;
- basic_block bb;
+ int i, b;
#ifdef HAVE_conditional_execution
sbitmap blocks;
bool changed;
@@ -3013,15 +3013,16 @@ peephole2_optimize (dump_file)
live = INITIALIZE_REG_SET (rs_heads[i]);
#ifdef HAVE_conditional_execution
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (blocks);
changed = false;
#else
count_or_remove_death_notes (NULL, 1);
#endif
- FOR_ALL_BB_REVERSE (bb)
+ for (b = n_basic_blocks - 1; b >= 0; --b)
{
+ basic_block bb = BASIC_BLOCK (b);
struct propagate_block_info *pbi;
/* Indicate that all slots except the last holds invalid data. */
diff --git a/gcc/reg-stack.c b/gcc/reg-stack.c
index 02fef8f..3e1a756 100644
--- a/gcc/reg-stack.c
+++ b/gcc/reg-stack.c
@@ -418,8 +418,8 @@ reg_to_stack (first, file)
rtx first;
FILE *file;
{
- basic_block bb;
- int max_uid, i;
+ int i;
+ int max_uid;
/* Clean up previous run. */
if (stack_regs_mentioned_data)
@@ -451,9 +451,10 @@ reg_to_stack (first, file)
/* Set up block info for each basic block. */
alloc_aux_for_blocks (sizeof (struct block_info_def));
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
edge e;
+ basic_block bb = BASIC_BLOCK (i);
for (e = bb->pred; e; e=e->pred_next)
if (!(e->flags & EDGE_DFS_BACK)
&& e->src != ENTRY_BLOCK_PTR)
@@ -2381,12 +2382,12 @@ print_stack (file, s)
static int
convert_regs_entry ()
{
- int inserted = 0;
+ int inserted = 0, i;
edge e;
- basic_block block;
- FOR_ALL_BB_REVERSE (block)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
+ basic_block block = BASIC_BLOCK (i);
block_info bi = BLOCK_INFO (block);
int reg;
@@ -2490,7 +2491,7 @@ compensate_edge (e, file)
current_block = block;
regstack = bi->stack_out;
if (file)
- fprintf (file, "Edge %d->%d: ", block->sindex, target->sindex);
+ fprintf (file, "Edge %d->%d: ", block->index, target->index);
if (target_stack->top == -2)
{
@@ -2650,7 +2651,7 @@ convert_regs_1 (file, block)
if (EDGE_CRITICAL_P (e))
beste = e;
}
- else if (e->src->sindex < beste->src->sindex)
+ else if (e->src->index < beste->src->index)
beste = e;
}
@@ -2664,7 +2665,7 @@ convert_regs_1 (file, block)
if (file)
{
- fprintf (file, "\nBasic block %d\nInput stack: ", block->sindex);
+ fprintf (file, "\nBasic block %d\nInput stack: ", block->index);
print_stack (file, &bi->stack_in);
}
@@ -2779,7 +2780,7 @@ convert_regs_2 (file, block)
basic_block *stack, *sp;
int inserted;
- stack = (basic_block *) xmalloc (sizeof (*stack) * num_basic_blocks);
+ stack = (basic_block *) xmalloc (sizeof (*stack) * n_basic_blocks);
sp = stack;
*sp++ = block;
@@ -2814,8 +2815,7 @@ static int
convert_regs (file)
FILE *file;
{
- int inserted;
- basic_block b;
+ int inserted, i;
edge e;
/* Initialize uninitialized registers on function entry. */
@@ -2835,8 +2835,9 @@ convert_regs (file)
/* ??? Process all unreachable blocks. Though there's no excuse
for keeping these even when not optimizing. */
- FOR_ALL_BB (b)
+ for (i = 0; i < n_basic_blocks; ++i)
{
+ basic_block b = BASIC_BLOCK (i);
block_info bi = BLOCK_INFO (b);
if (! bi->done)
diff --git a/gcc/regclass.c b/gcc/regclass.c
index 668d92d..decab26 100644
--- a/gcc/regclass.c
+++ b/gcc/regclass.c
@@ -1127,10 +1127,10 @@ scan_one_insn (insn, pass)
INSN could not be at the beginning of that block. */
if (previnsn == 0 || GET_CODE (previnsn) == JUMP_INSN)
{
- basic_block b;
- FOR_ALL_BB (b)
- if (insn == b->head)
- b->head = newinsn;
+ int b;
+ for (b = 0; b < n_basic_blocks; b++)
+ if (insn == BLOCK_HEAD (b))
+ BLOCK_HEAD (b) = newinsn;
}
/* This makes one more setting of new insns's dest. */
@@ -1255,7 +1255,7 @@ regclass (f, nregs, dump)
for (pass = 0; pass <= flag_expensive_optimizations; pass++)
{
- basic_block bb;
+ int index;
if (dump)
fprintf (dump, "\n\nPass %i\n\n",pass);
@@ -1277,8 +1277,9 @@ regclass (f, nregs, dump)
insn = scan_one_insn (insn, pass);
}
else
- FOR_ALL_BB (bb)
+ for (index = 0; index < n_basic_blocks; index++)
{
+ basic_block bb = BASIC_BLOCK (index);
/* Show that an insn inside a loop is likely to be executed three
times more than insns outside a loop. This is much more
diff --git a/gcc/regmove.c b/gcc/regmove.c
index 4188ce6..7b073f2 100644
--- a/gcc/regmove.c
+++ b/gcc/regmove.c
@@ -223,7 +223,7 @@ mark_flags_life_zones (flags)
{
int flags_regno;
int flags_nregs;
- basic_block block;
+ int block;
#ifdef HAVE_cc0
/* If we found a flags register on a cc0 host, bail. */
@@ -254,13 +254,13 @@ mark_flags_life_zones (flags)
flags_set_1_rtx = flags;
/* Process each basic block. */
- FOR_ALL_BB_REVERSE (block)
+ for (block = n_basic_blocks - 1; block >= 0; block--)
{
rtx insn, end;
int live;
- insn = block->head;
- end = block->end;
+ insn = BLOCK_HEAD (block);
+ end = BLOCK_END (block);
/* Look out for the (unlikely) case of flags being live across
basic block boundaries. */
@@ -269,7 +269,7 @@ mark_flags_life_zones (flags)
{
int i;
for (i = 0; i < flags_nregs; ++i)
- live |= REGNO_REG_SET_P (block->global_live_at_start,
+ live |= REGNO_REG_SET_P (BASIC_BLOCK (block)->global_live_at_start,
flags_regno + i);
}
#endif
@@ -1061,7 +1061,6 @@ regmove_optimize (f, nregs, regmove_dump_file)
int pass;
int i;
rtx copy_src, copy_dst;
- basic_block bb;
/* ??? Hack. Regmove doesn't examine the CFG, and gets mightily
confused by non-call exceptions ending blocks. */
@@ -1077,8 +1076,8 @@ regmove_optimize (f, nregs, regmove_dump_file)
regmove_bb_head = (int *) xmalloc (sizeof (int) * (old_max_uid + 1));
for (i = old_max_uid; i >= 0; i--) regmove_bb_head[i] = -1;
- FOR_ALL_BB (bb)
- regmove_bb_head[INSN_UID (bb->head)] = bb->sindex;
+ for (i = 0; i < n_basic_blocks; i++)
+ regmove_bb_head[INSN_UID (BLOCK_HEAD (i))] = i;
/* A forward/backward pass. Replace output operands with input operands. */
@@ -1505,15 +1504,15 @@ regmove_optimize (f, nregs, regmove_dump_file)
/* In fixup_match_1, some insns may have been inserted after basic block
ends. Fix that here. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
- rtx end = bb->end;
+ rtx end = BLOCK_END (i);
rtx new = end;
rtx next = NEXT_INSN (new);
while (next != 0 && INSN_UID (next) >= old_max_uid
- && (bb->next_bb == EXIT_BLOCK_PTR || bb->next_bb->head != next))
+ && (i == n_basic_blocks - 1 || BLOCK_HEAD (i + 1) != next))
new = next, next = NEXT_INSN (new);
- bb->end = new;
+ BLOCK_END (i) = new;
}
done:
@@ -2139,10 +2138,10 @@ static int record_stack_memrefs PARAMS ((rtx *, void *));
void
combine_stack_adjustments ()
{
- basic_block bb;
+ int i;
- FOR_ALL_BB (bb)
- combine_stack_adjustments_for_block (bb);
+ for (i = 0; i < n_basic_blocks; ++i)
+ combine_stack_adjustments_for_block (BASIC_BLOCK (i));
}
/* Recognize a MEM of the form (sp) or (plus sp const). */
diff --git a/gcc/regrename.c b/gcc/regrename.c
index 421a7bb5..4297da7 100644
--- a/gcc/regrename.c
+++ b/gcc/regrename.c
@@ -201,7 +201,7 @@ regrename_optimize ()
{
int tick[FIRST_PSEUDO_REGISTER];
int this_tick = 0;
- basic_block bb;
+ int b;
char *first_obj;
memset (tick, 0, sizeof tick);
@@ -209,8 +209,9 @@ regrename_optimize ()
gcc_obstack_init (&rename_obstack);
first_obj = (char *) obstack_alloc (&rename_obstack, 0);
- FOR_ALL_BB (bb)
+ for (b = 0; b < n_basic_blocks; b++)
{
+ basic_block bb = BASIC_BLOCK (b);
struct du_chain *all_chains = 0;
HARD_REG_SET unavailable;
HARD_REG_SET regs_seen;
@@ -218,7 +219,7 @@ regrename_optimize ()
CLEAR_HARD_REG_SET (unavailable);
if (rtl_dump_file)
- fprintf (rtl_dump_file, "\nBasic block %d:\n", bb->sindex);
+ fprintf (rtl_dump_file, "\nBasic block %d:\n", b);
all_chains = build_def_use (bb);
@@ -1725,30 +1726,30 @@ copyprop_hardreg_forward ()
{
struct value_data *all_vd;
bool need_refresh;
- basic_block bb, bbp;
+ int b;
need_refresh = false;
- all_vd = xmalloc (sizeof (struct value_data) * last_basic_block);
+ all_vd = xmalloc (sizeof (struct value_data) * n_basic_blocks);
- FOR_ALL_BB (bb)
+ for (b = 0; b < n_basic_blocks; b++)
{
+ basic_block bb = BASIC_BLOCK (b);
+
/* If a block has a single predecessor, that we've already
processed, begin with the value data that was live at
the end of the predecessor block. */
/* ??? Ought to use more intelligent queueing of blocks. */
- if (bb->pred)
- for (bbp = bb; bbp && bbp != bb->pred->src; bbp = bbp->prev_bb);
if (bb->pred
&& ! bb->pred->pred_next
&& ! (bb->pred->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
- && bb->pred->src != ENTRY_BLOCK_PTR
- && bbp)
- all_vd[bb->sindex] = all_vd[bb->pred->src->sindex];
+ && bb->pred->src->index != ENTRY_BLOCK
+ && bb->pred->src->index < b)
+ all_vd[b] = all_vd[bb->pred->src->index];
else
- init_value_data (all_vd + bb->sindex);
+ init_value_data (all_vd + b);
- if (copyprop_hardreg_forward_1 (bb, all_vd + bb->sindex))
+ if (copyprop_hardreg_forward_1 (bb, all_vd + b))
need_refresh = true;
}
diff --git a/gcc/reload1.c b/gcc/reload1.c
index ecfe078..1349c3c 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -676,7 +676,6 @@ reload (first, global)
int i;
rtx insn;
struct elim_table *ep;
- basic_block bb;
/* The two pointers used to track the true location of the memory used
for label offsets. */
@@ -1124,8 +1123,8 @@ reload (first, global)
pseudo. */
if (! frame_pointer_needed)
- FOR_ALL_BB (bb)
- CLEAR_REGNO_REG_SET (bb->global_live_at_start,
+ for (i = 0; i < n_basic_blocks; i++)
+ CLEAR_REGNO_REG_SET (BASIC_BLOCK (i)->global_live_at_start,
HARD_FRAME_POINTER_REGNUM);
/* Come here (with failure set nonzero) if we can't get enough spill regs
@@ -8613,7 +8612,6 @@ reload_combine ()
int first_index_reg = -1;
int last_index_reg = 0;
int i;
- basic_block bb;
unsigned int r;
int last_label_ruid;
int min_labelno, n_labels;
@@ -8649,17 +8647,17 @@ reload_combine ()
label_live = (HARD_REG_SET *) xmalloc (n_labels * sizeof (HARD_REG_SET));
CLEAR_HARD_REG_SET (ever_live_at_start);
- FOR_ALL_BB_REVERSE (bb)
+ for (i = n_basic_blocks - 1; i >= 0; i--)
{
- insn = bb->head;
+ insn = BLOCK_HEAD (i);
if (GET_CODE (insn) == CODE_LABEL)
{
HARD_REG_SET live;
REG_SET_TO_HARD_REG_SET (live,
- bb->global_live_at_start);
+ BASIC_BLOCK (i)->global_live_at_start);
compute_use_by_pseudos (&live,
- bb->global_live_at_start);
+ BASIC_BLOCK (i)->global_live_at_start);
COPY_HARD_REG_SET (LABEL_LIVE (insn), live);
IOR_HARD_REG_SET (ever_live_at_start, live);
}
@@ -9490,11 +9488,12 @@ copy_eh_notes (insn, x)
void
fixup_abnormal_edges ()
{
+ int i;
bool inserted = false;
- basic_block bb;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
edge e;
/* Look for cases we are interested in - an calls or instructions causing
diff --git a/gcc/reorg.c b/gcc/reorg.c
index 375687d..8861dfe 100644
--- a/gcc/reorg.c
+++ b/gcc/reorg.c
@@ -3601,7 +3601,7 @@ dbr_schedule (first, file)
/* If the current function has no insns other than the prologue and
epilogue, then do not try to fill any delay slots. */
- if (num_basic_blocks == 0)
+ if (n_basic_blocks == 0)
return;
/* Find the highest INSN_UID and allocate and initialize our map from
diff --git a/gcc/resource.c b/gcc/resource.c
index 04c6cb7..6c20517 100644
--- a/gcc/resource.c
+++ b/gcc/resource.c
@@ -133,7 +133,7 @@ find_basic_block (insn, search_limit)
rtx insn;
int search_limit;
{
- basic_block bb;
+ int i;
/* Scan backwards to the previous BARRIER. Then see if we can find a
label that starts a basic block. Return the basic block number. */
@@ -156,9 +156,9 @@ find_basic_block (insn, search_limit)
insn && GET_CODE (insn) == CODE_LABEL;
insn = next_nonnote_insn (insn))
{
- FOR_ALL_BB (bb)
- if (insn == bb->head)
- return bb->sindex;
+ for (i = 0; i < n_basic_blocks; i++)
+ if (insn == BLOCK_HEAD (i))
+ return i;
}
return -1;
@@ -1240,7 +1240,7 @@ init_resource_info (epilogue_insn)
/* Allocate and initialize the tables used by mark_target_live_regs. */
target_hash_table = (struct target_info **)
xcalloc (TARGET_HASH_PRIME, sizeof (struct target_info *));
- bb_ticks = (int *) xcalloc (last_basic_block, sizeof (int));
+ bb_ticks = (int *) xcalloc (n_basic_blocks, sizeof (int));
}
/* Free up the resources allcated to mark_target_live_regs (). This
diff --git a/gcc/sbitmap.c b/gcc/sbitmap.c
index c044ae4..e581000 100644
--- a/gcc/sbitmap.c
+++ b/gcc/sbitmap.c
@@ -446,7 +446,7 @@ sbitmap_intersection_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
- sbitmap_copy (dst, src[e->dest->sindex]);
+ sbitmap_copy (dst, src[e->dest->index]);
break;
}
@@ -461,7 +461,7 @@ sbitmap_intersection_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
- p = src[e->dest->sindex]->elms;
+ p = src[e->dest->index]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ &= *p++;
@@ -486,7 +486,7 @@ sbitmap_intersection_of_preds (dst, src, bb)
if (e->src == ENTRY_BLOCK_PTR)
continue;
- sbitmap_copy (dst, src[e->src->sindex]);
+ sbitmap_copy (dst, src[e->src->index]);
break;
}
@@ -501,7 +501,7 @@ sbitmap_intersection_of_preds (dst, src, bb)
if (e->src == ENTRY_BLOCK_PTR)
continue;
- p = src[e->src->sindex]->elms;
+ p = src[e->src->index]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ &= *p++;
@@ -526,7 +526,7 @@ sbitmap_union_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
- sbitmap_copy (dst, src[e->dest->sindex]);
+ sbitmap_copy (dst, src[e->dest->index]);
break;
}
@@ -541,7 +541,7 @@ sbitmap_union_of_succs (dst, src, bb)
if (e->dest == EXIT_BLOCK_PTR)
continue;
- p = src[e->dest->sindex]->elms;
+ p = src[e->dest->index]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ |= *p++;
@@ -566,7 +566,7 @@ sbitmap_union_of_preds (dst, src, bb)
if (e->src== ENTRY_BLOCK_PTR)
continue;
- sbitmap_copy (dst, src[e->src->sindex]);
+ sbitmap_copy (dst, src[e->src->index]);
break;
}
@@ -580,8 +580,8 @@ sbitmap_union_of_preds (dst, src, bb)
if (e->src == ENTRY_BLOCK_PTR)
continue;
-
- p = src[e->src->sindex]->elms;
+
+ p = src[e->src->index]->elms;
r = dst->elms;
for (i = 0; i < set_size; i++)
*r++ |= *p++;
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 0eea171..88bf2b7 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -1494,7 +1494,7 @@ init_dependency_caches (luid)
average number of instructions in a basic block is very high. See
the comment before the declaration of true_dependency_cache for
what we consider "very high". */
- if (luid / num_basic_blocks > 100 * 5)
+ if (luid / n_basic_blocks > 100 * 5)
{
true_dependency_cache = sbitmap_vector_alloc (luid, luid);
sbitmap_vector_zero (true_dependency_cache, luid);
diff --git a/gcc/sched-ebb.c b/gcc/sched-ebb.c
index 67f8884..5f1464b 100644
--- a/gcc/sched-ebb.c
+++ b/gcc/sched-ebb.c
@@ -279,11 +279,11 @@ void
schedule_ebbs (dump_file)
FILE *dump_file;
{
- basic_block bb;
+ int i;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (num_basic_blocks == 0)
+ if (n_basic_blocks == 0)
return;
scope_to_insns_initialize ();
@@ -296,19 +296,20 @@ schedule_ebbs (dump_file)
compute_bb_for_insn (get_max_uid ());
/* Schedule every region in the subroutine. */
- FOR_ALL_BB (bb)
- {
- rtx head = bb->head;
+ for (i = 0; i < n_basic_blocks; i++)
+ {
+ rtx head = BASIC_BLOCK (i)->head;
rtx tail;
for (;;)
{
+ basic_block b = BASIC_BLOCK (i);
edge e;
- tail = bb->end;
- if (bb->next_bb == EXIT_BLOCK_PTR
- || GET_CODE (bb->next_bb->head) == CODE_LABEL)
+ tail = b->end;
+ if (i + 1 == n_basic_blocks
+ || GET_CODE (BLOCK_HEAD (i + 1)) == CODE_LABEL)
break;
- for (e = bb->succ; e; e = e->succ_next)
+ for (e = b->succ; e; e = e->succ_next)
if ((e->flags & EDGE_FALLTHRU) != 0)
break;
if (! e)
@@ -324,7 +325,7 @@ schedule_ebbs (dump_file)
}
}
- bb = bb->next_bb;
+ i++;
}
/* Blah. We should fix the rest of the code not to get confused by
diff --git a/gcc/sched-rgn.c b/gcc/sched-rgn.c
index 892455e..acc8477 100644
--- a/gcc/sched-rgn.c
+++ b/gcc/sched-rgn.c
@@ -319,7 +319,7 @@ static void free_pending_lists PARAMS ((void));
static int
is_cfg_nonregular ()
{
- basic_block b;
+ int b;
rtx insn;
RTX_CODE code;
@@ -346,8 +346,8 @@ is_cfg_nonregular ()
/* If we have non-jumping insns which refer to labels, then we consider
the cfg not well structured. */
/* Check for labels referred to other thn by jumps. */
- FOR_ALL_BB (b)
- for (insn = b->head;; insn = NEXT_INSN (insn))
+ for (b = 0; b < n_basic_blocks; b++)
+ for (insn = BLOCK_HEAD (b);; insn = NEXT_INSN (insn))
{
code = GET_CODE (insn);
if (GET_RTX_CLASS (code) == 'i' && code != JUMP_INSN)
@@ -361,7 +361,7 @@ is_cfg_nonregular ()
return 1;
}
- if (insn == b->end)
+ if (insn == BLOCK_END (b))
break;
}
@@ -382,7 +382,6 @@ build_control_flow (edge_list)
struct edge_list *edge_list;
{
int i, unreachable, num_edges;
- basic_block b;
/* This already accounts for entry/exit edges. */
num_edges = NUM_EDGES (edge_list);
@@ -394,8 +393,10 @@ build_control_flow (edge_list)
test is redundant with the one in find_rgns, but it's much
cheaper to go ahead and catch the trivial case here. */
unreachable = 0;
- FOR_ALL_BB (b)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block b = BASIC_BLOCK (i);
+
if (b->pred == NULL
|| (b->pred->src == b
&& b->pred->pred_next == NULL))
@@ -403,8 +404,8 @@ build_control_flow (edge_list)
}
/* ??? We can kill these soon. */
- in_edges = (int *) xcalloc (last_basic_block, sizeof (int));
- out_edges = (int *) xcalloc (last_basic_block, sizeof (int));
+ in_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
+ out_edges = (int *) xcalloc (n_basic_blocks, sizeof (int));
edge_table = (haifa_edge *) xcalloc (num_edges, sizeof (haifa_edge));
nr_edges = 0;
@@ -414,7 +415,7 @@ build_control_flow (edge_list)
if (e->dest != EXIT_BLOCK_PTR
&& e->src != ENTRY_BLOCK_PTR)
- new_edge (e->src->sindex, e->dest->sindex);
+ new_edge (e->src->index, e->dest->index);
}
/* Increment by 1, since edge 0 is unused. */
@@ -543,19 +544,17 @@ debug_regions ()
static void
find_single_block_region ()
{
- basic_block bb;
-
- nr_regions = 0;
+ int i;
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
- rgn_bb_table[nr_regions] = bb->sindex;
- RGN_NR_BLOCKS (nr_regions) = 1;
- RGN_BLOCKS (nr_regions) = nr_regions;
- CONTAINING_RGN (bb->sindex) = nr_regions;
- BLOCK_TO_BB (bb->sindex) = 0;
- nr_regions++;
+ rgn_bb_table[i] = i;
+ RGN_NR_BLOCKS (i) = 1;
+ RGN_BLOCKS (i) = i;
+ CONTAINING_RGN (i) = i;
+ BLOCK_TO_BB (i) = 0;
}
+ nr_regions = n_basic_blocks;
}
/* Update number of blocks and the estimate for number of insns
@@ -632,7 +631,6 @@ find_rgns (edge_list, dom)
int count = 0, sp, idx = 0, current_edge = out_edges[0];
int num_bbs, num_insns, unreachable;
int too_large_failure;
- basic_block bb;
/* Note if an edge has been passed. */
sbitmap passed;
@@ -661,26 +659,26 @@ find_rgns (edge_list, dom)
STACK, SP and DFS_NR are only used during the first traversal. */
/* Allocate and initialize variables for the first traversal. */
- max_hdr = (int *) xmalloc (last_basic_block * sizeof (int));
- dfs_nr = (int *) xcalloc (last_basic_block, sizeof (int));
+ max_hdr = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ dfs_nr = (int *) xcalloc (n_basic_blocks, sizeof (int));
stack = (int *) xmalloc (nr_edges * sizeof (int));
- inner = sbitmap_alloc (last_basic_block);
+ inner = sbitmap_alloc (n_basic_blocks);
sbitmap_ones (inner);
- header = sbitmap_alloc (last_basic_block);
+ header = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (header);
passed = sbitmap_alloc (nr_edges);
sbitmap_zero (passed);
- in_queue = sbitmap_alloc (last_basic_block);
+ in_queue = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (in_queue);
- in_stack = sbitmap_alloc (last_basic_block);
+ in_stack = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (in_stack);
- for (i = 0; i < last_basic_block; i++)
+ for (i = 0; i < n_basic_blocks; i++)
max_hdr[i] = -1;
/* DFS traversal to find inner loops in the cfg. */
@@ -774,8 +772,8 @@ find_rgns (edge_list, dom)
the entry node by placing a nonzero value in dfs_nr. Thus if
dfs_nr is zero for any block, then it must be unreachable. */
unreachable = 0;
- FOR_ALL_BB (bb)
- if (dfs_nr[bb->sindex] == 0)
+ for (i = 0; i < n_basic_blocks; i++)
+ if (dfs_nr[i] == 0)
{
unreachable = 1;
break;
@@ -785,14 +783,14 @@ find_rgns (edge_list, dom)
to hold degree counts. */
degree = dfs_nr;
- FOR_ALL_BB (bb)
- degree[bb->sindex] = 0;
+ for (i = 0; i < n_basic_blocks; i++)
+ degree[i] = 0;
for (i = 0; i < num_edges; i++)
{
edge e = INDEX_EDGE (edge_list, i);
if (e->dest != EXIT_BLOCK_PTR)
- degree[e->dest->sindex]++;
+ degree[e->dest->index]++;
}
/* Do not perform region scheduling if there are any unreachable
@@ -807,16 +805,16 @@ find_rgns (edge_list, dom)
/* Second travsersal:find reducible inner loops and topologically sort
block of each region. */
- queue = (int *) xmalloc (num_basic_blocks * sizeof (int));
+ queue = (int *) xmalloc (n_basic_blocks * sizeof (int));
/* Find blocks which are inner loop headers. We still have non-reducible
loops to consider at this point. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
- if (TEST_BIT (header, bb->sindex) && TEST_BIT (inner, bb->sindex))
+ if (TEST_BIT (header, i) && TEST_BIT (inner, i))
{
edge e;
- basic_block jbb;
+ int j;
/* Now check that the loop is reducible. We do this separate
from finding inner loops so that we do not find a reducible
@@ -829,15 +827,15 @@ find_rgns (edge_list, dom)
If there exists a block that is not dominated by the loop
header, then the block is reachable from outside the loop
and thus the loop is not a natural loop. */
- FOR_ALL_BB (jbb)
+ for (j = 0; j < n_basic_blocks; j++)
{
/* First identify blocks in the loop, except for the loop
entry block. */
- if (bb->sindex == max_hdr[jbb->sindex] && bb != jbb)
+ if (i == max_hdr[j] && i != j)
{
/* Now verify that the block is dominated by the loop
header. */
- if (!TEST_BIT (dom[jbb->sindex], bb->sindex))
+ if (!TEST_BIT (dom[j], i))
break;
}
}
@@ -845,25 +843,25 @@ find_rgns (edge_list, dom)
/* If we exited the loop early, then I is the header of
a non-reducible loop and we should quit processing it
now. */
- if (jbb != EXIT_BLOCK_PTR)
+ if (j != n_basic_blocks)
continue;
/* I is a header of an inner loop, or block 0 in a subroutine
with no loops at all. */
head = tail = -1;
too_large_failure = 0;
- loop_head = max_hdr[bb->sindex];
+ loop_head = max_hdr[i];
/* Decrease degree of all I's successors for topological
ordering. */
- for (e = bb->succ; e; e = e->succ_next)
+ for (e = BASIC_BLOCK (i)->succ; e; e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
- --degree[e->dest->sindex];
+ --degree[e->dest->index];
/* Estimate # insns, and count # blocks in the region. */
num_bbs = 1;
- num_insns = (INSN_LUID (bb->end)
- - INSN_LUID (bb->head));
+ num_insns = (INSN_LUID (BLOCK_END (i))
+ - INSN_LUID (BLOCK_HEAD (i)));
/* Find all loop latches (blocks with back edges to the loop
header) or all the leaf blocks in the cfg has no loops.
@@ -871,17 +869,17 @@ find_rgns (edge_list, dom)
Place those blocks into the queue. */
if (no_loops)
{
- FOR_ALL_BB (jbb)
+ for (j = 0; j < n_basic_blocks; j++)
/* Leaf nodes have only a single successor which must
be EXIT_BLOCK. */
- if (jbb->succ
- && jbb->succ->dest == EXIT_BLOCK_PTR
- && jbb->succ->succ_next == NULL)
+ if (BASIC_BLOCK (j)->succ
+ && BASIC_BLOCK (j)->succ->dest == EXIT_BLOCK_PTR
+ && BASIC_BLOCK (j)->succ->succ_next == NULL)
{
- queue[++tail] = jbb->sindex;
- SET_BIT (in_queue, jbb->sindex);
+ queue[++tail] = j;
+ SET_BIT (in_queue, j);
- if (too_large (jbb->sindex, &num_bbs, &num_insns))
+ if (too_large (j, &num_bbs, &num_insns))
{
too_large_failure = 1;
break;
@@ -892,14 +890,14 @@ find_rgns (edge_list, dom)
{
edge e;
- for (e = bb->pred; e; e = e->pred_next)
+ for (e = BASIC_BLOCK (i)->pred; e; e = e->pred_next)
{
if (e->src == ENTRY_BLOCK_PTR)
continue;
- node = e->src->sindex;
+ node = e->src->index;
- if (max_hdr[node] == loop_head && node != bb->sindex)
+ if (max_hdr[node] == loop_head && node != i)
{
/* This is a loop latch. */
queue[++tail] = node;
@@ -951,7 +949,7 @@ find_rgns (edge_list, dom)
for (e = BASIC_BLOCK (child)->pred; e; e = e->pred_next)
{
- node = e->src->sindex;
+ node = e->src->index;
/* See discussion above about nodes not marked as in
this loop during the initial DFS traversal. */
@@ -961,7 +959,7 @@ find_rgns (edge_list, dom)
tail = -1;
break;
}
- else if (!TEST_BIT (in_queue, node) && node != bb->sindex)
+ else if (!TEST_BIT (in_queue, node) && node != i)
{
queue[++tail] = node;
SET_BIT (in_queue, node);
@@ -978,12 +976,12 @@ find_rgns (edge_list, dom)
if (tail >= 0 && !too_large_failure)
{
/* Place the loop header into list of region blocks. */
- degree[bb->sindex] = -1;
- rgn_bb_table[idx] = bb->sindex;
+ degree[i] = -1;
+ rgn_bb_table[idx] = i;
RGN_NR_BLOCKS (nr_regions) = num_bbs;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (bb->sindex) = nr_regions;
- BLOCK_TO_BB (bb->sindex) = count = 0;
+ CONTAINING_RGN (i) = nr_regions;
+ BLOCK_TO_BB (i) = count = 0;
/* Remove blocks from queue[] when their in degree
becomes zero. Repeat until no blocks are left on the
@@ -1008,7 +1006,7 @@ find_rgns (edge_list, dom)
e;
e = e->succ_next)
if (e->dest != EXIT_BLOCK_PTR)
- --degree[e->dest->sindex];
+ --degree[e->dest->index];
}
else
--head;
@@ -1022,14 +1020,14 @@ find_rgns (edge_list, dom)
/* Any block that did not end up in a region is placed into a region
by itself. */
- FOR_ALL_BB (bb)
- if (degree[bb->sindex] >= 0)
+ for (i = 0; i < n_basic_blocks; i++)
+ if (degree[i] >= 0)
{
- rgn_bb_table[idx] = bb->sindex;
+ rgn_bb_table[idx] = i;
RGN_NR_BLOCKS (nr_regions) = 1;
RGN_BLOCKS (nr_regions) = idx++;
- CONTAINING_RGN (bb->sindex) = nr_regions++;
- BLOCK_TO_BB (bb->sindex) = 0;
+ CONTAINING_RGN (i) = nr_regions++;
+ BLOCK_TO_BB (i) = 0;
}
free (max_hdr);
@@ -1197,8 +1195,8 @@ compute_trg_info (trg)
add the TO block to the update block list. This list can end
up with a lot of duplicates. We need to weed them out to avoid
overrunning the end of the bblst_table. */
- update_blocks = (char *) alloca (last_basic_block);
- memset (update_blocks, 0, last_basic_block);
+ update_blocks = (char *) alloca (n_basic_blocks);
+ memset (update_blocks, 0, n_basic_blocks);
update_idx = 0;
for (j = 0; j < el.nr_members; j++)
@@ -2888,14 +2886,14 @@ init_regions ()
int rgn;
nr_regions = 0;
- rgn_table = (region *) xmalloc ((num_basic_blocks) * sizeof (region));
- rgn_bb_table = (int *) xmalloc ((num_basic_blocks) * sizeof (int));
- block_to_bb = (int *) xmalloc ((last_basic_block) * sizeof (int));
- containing_rgn = (int *) xmalloc ((last_basic_block) * sizeof (int));
+ rgn_table = (region *) xmalloc ((n_basic_blocks) * sizeof (region));
+ rgn_bb_table = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
+ block_to_bb = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
+ containing_rgn = (int *) xmalloc ((n_basic_blocks) * sizeof (int));
/* Compute regions for scheduling. */
if (reload_completed
- || num_basic_blocks == 1
+ || n_basic_blocks == 1
|| !flag_schedule_interblock)
{
find_single_block_region ();
@@ -2912,7 +2910,7 @@ init_regions ()
sbitmap *dom;
struct edge_list *edge_list;
- dom = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ dom = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
/* The scheduler runs after flow; therefore, we can't blindly call
back into find_basic_blocks since doing so could invalidate the
@@ -2953,7 +2951,7 @@ init_regions ()
if (CHECK_DEAD_NOTES)
{
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (n_basic_blocks);
deaths_in_region = (int *) xmalloc (sizeof (int) * nr_regions);
/* Remove all death notes from the subroutine. */
for (rgn = 0; rgn < nr_regions; rgn++)
@@ -2982,11 +2980,10 @@ schedule_insns (dump_file)
sbitmap large_region_blocks, blocks;
int rgn;
int any_large_regions;
- basic_block bb;
/* Taking care of this degenerate case makes the rest of
this code simpler. */
- if (num_basic_blocks == 0)
+ if (n_basic_blocks == 0)
return;
scope_to_insns_initialize ();
@@ -3021,12 +3018,10 @@ schedule_insns (dump_file)
compute_bb_for_insn (get_max_uid ());
any_large_regions = 0;
- large_region_blocks = sbitmap_alloc (last_basic_block);
- sbitmap_zero (large_region_blocks);
- FOR_ALL_BB (bb)
- SET_BIT (large_region_blocks, bb->sindex);
+ large_region_blocks = sbitmap_alloc (n_basic_blocks);
+ sbitmap_ones (large_region_blocks);
- blocks = sbitmap_alloc (last_basic_block);
+ blocks = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (blocks);
/* Update life information. For regions consisting of multiple blocks
diff --git a/gcc/sibcall.c b/gcc/sibcall.c
index 22a5f55..c62941f 100644
--- a/gcc/sibcall.c
+++ b/gcc/sibcall.c
@@ -583,7 +583,7 @@ optimize_sibling_and_tail_recursive_calls ()
cleanup_cfg (CLEANUP_PRE_SIBCALL | CLEANUP_PRE_LOOP);
/* If there are no basic blocks, then there is nothing to do. */
- if (num_basic_blocks == 0)
+ if (n_basic_blocks == 0)
return;
/* If we are using sjlj exceptions, we may need to add a call to
@@ -610,7 +610,7 @@ optimize_sibling_and_tail_recursive_calls ()
/* Walk forwards through the last normal block and see if it
does nothing except fall into the exit block. */
- for (insn = EXIT_BLOCK_PTR->prev_bb->head;
+ for (insn = BLOCK_HEAD (n_basic_blocks - 1);
insn;
insn = NEXT_INSN (insn))
{
diff --git a/gcc/ssa-ccp.c b/gcc/ssa-ccp.c
index be1b0dd..6417276 100644
--- a/gcc/ssa-ccp.c
+++ b/gcc/ssa-ccp.c
@@ -648,13 +648,13 @@ examine_flow_edges ()
/* If this is the first time we've simulated this block, then we
must simulate each of its insns. */
- if (!TEST_BIT (executable_blocks, succ_block->sindex))
+ if (!TEST_BIT (executable_blocks, succ_block->index))
{
rtx currinsn;
edge succ_edge = succ_block->succ;
/* Note that we have simulated this block. */
- SET_BIT (executable_blocks, succ_block->sindex);
+ SET_BIT (executable_blocks, succ_block->index);
/* Simulate each insn within the block. */
currinsn = succ_block->head;
@@ -740,7 +740,6 @@ optimize_unexecutable_edges (edges, executable_edges)
sbitmap executable_edges;
{
int i;
- basic_block bb;
for (i = 0; i < NUM_EDGES (edges); i++)
{
@@ -762,15 +761,15 @@ optimize_unexecutable_edges (edges, executable_edges)
remove_phi_alternative (PATTERN (insn), edge->src);
if (rtl_dump_file)
fprintf (rtl_dump_file,
- "Removing alternative for bb %d of phi %d\n",
- edge->src->sindex, SSA_NAME (PATTERN (insn)));
+ "Removing alternative for bb %d of phi %d\n",
+ edge->src->index, SSA_NAME (PATTERN (insn)));
insn = NEXT_INSN (insn);
}
}
if (rtl_dump_file)
fprintf (rtl_dump_file,
"Removing unexecutable edge from %d to %d\n",
- edge->src->sindex, edge->dest->sindex);
+ edge->src->index, edge->dest->index);
/* Since the edge was not executable, remove it from the CFG. */
remove_edge (edge);
}
@@ -798,8 +797,9 @@ optimize_unexecutable_edges (edges, executable_edges)
In cases B & C we are removing uses of registers, so make sure
to note those changes for the DF analyzer. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
rtx insn = bb->end;
edge edge = bb->succ;
@@ -929,7 +929,7 @@ ssa_ccp_substitute_constants ()
static void
ssa_ccp_df_delete_unreachable_insns ()
{
- basic_block b;
+ int i;
/* Use the CFG to find all the reachable blocks. */
find_unreachable_blocks ();
@@ -937,8 +937,10 @@ ssa_ccp_df_delete_unreachable_insns ()
/* Now we know what blocks are not reachable. Mark all the insns
in those blocks as deleted for the DF analyzer. We'll let the
normal flow code actually remove the unreachable blocks. */
- FOR_ALL_BB_REVERSE (b)
+ for (i = n_basic_blocks - 1; i >= 0; --i)
{
+ basic_block b = BASIC_BLOCK (i);
+
if (!(b->flags & BB_REACHABLE))
{
rtx start = b->head;
@@ -1016,7 +1018,7 @@ ssa_const_prop ()
ssa_edges = sbitmap_alloc (VARRAY_SIZE (ssa_definition));
sbitmap_zero (ssa_edges);
- executable_blocks = sbitmap_alloc (last_basic_block);
+ executable_blocks = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (executable_blocks);
executable_edges = sbitmap_alloc (NUM_EDGES (edges));
diff --git a/gcc/ssa-dce.c b/gcc/ssa-dce.c
index 8bd5e09..45dcd65 100644
--- a/gcc/ssa-dce.c
+++ b/gcc/ssa-dce.c
@@ -153,7 +153,7 @@ static void delete_insn_bb
/* Create a control_dependent_block_to_edge_map, given the number
NUM_BASIC_BLOCKS of non-entry, non-exit basic blocks, e.g.,
- num_basic_blocks. This memory must be released using
+ n_basic_blocks. This memory must be released using
control_dependent_block_to_edge_map_free (). */
static control_dependent_block_to_edge_map
@@ -181,10 +181,10 @@ set_control_dependent_block_to_edge_map_bit (c, bb, edge_index)
basic_block bb;
int edge_index;
{
- if (bb->sindex - (INVALID_BLOCK+1) >= c->length)
+ if (bb->index - (INVALID_BLOCK+1) >= c->length)
abort ();
- bitmap_set_bit (c->data[bb->sindex - (INVALID_BLOCK+1)],
+ bitmap_set_bit (c->data[bb->index - (INVALID_BLOCK+1)],
edge_index);
}
@@ -247,7 +247,7 @@ find_control_dependence (el, edge_index, pdom, cdbte)
abort ();
ending_block =
(INDEX_EDGE_PRED_BB (el, edge_index) == ENTRY_BLOCK_PTR)
- ? ENTRY_BLOCK_PTR->next_bb
+ ? BASIC_BLOCK (0)
: find_pdom (pdom, INDEX_EDGE_PRED_BB (el, edge_index));
for (current_block = INDEX_EDGE_SUCC_BB (el, edge_index);
@@ -271,15 +271,15 @@ find_pdom (pdom, block)
{
if (!block)
abort ();
- if (block->sindex == INVALID_BLOCK)
+ if (block->index == INVALID_BLOCK)
abort ();
if (block == ENTRY_BLOCK_PTR)
- return ENTRY_BLOCK_PTR->next_bb;
- else if (block == EXIT_BLOCK_PTR || pdom[block->sindex] == EXIT_BLOCK)
+ return BASIC_BLOCK (0);
+ else if (block == EXIT_BLOCK_PTR || pdom[block->index] == EXIT_BLOCK)
return EXIT_BLOCK_PTR;
else
- return BASIC_BLOCK (pdom[block->sindex]);
+ return BASIC_BLOCK (pdom[block->index]);
}
/* Determine if the given CURRENT_RTX uses a hard register not
@@ -490,7 +490,6 @@ ssa_eliminate_dead_code ()
{
int i;
rtx insn;
- basic_block bb;
/* Necessary instructions with operands to explore. */
varray_type unprocessed_instructions;
/* Map element (b,e) is nonzero if the block is control dependent on
@@ -506,7 +505,7 @@ ssa_eliminate_dead_code ()
mark_all_insn_unnecessary ();
VARRAY_RTX_INIT (unprocessed_instructions, 64,
"unprocessed instructions");
- cdbte = control_dependent_block_to_edge_map_create (last_basic_block);
+ cdbte = control_dependent_block_to_edge_map_create (n_basic_blocks);
/* Prepare for use of BLOCK_NUM (). */
connect_infinite_loops_to_exit ();
@@ -514,12 +513,12 @@ ssa_eliminate_dead_code ()
compute_bb_for_insn (max_insn_uid);
/* Compute control dependence. */
- pdom = (int *) xmalloc (last_basic_block * sizeof (int));
- for (i = 0; i < last_basic_block; ++i)
+ pdom = (int *) xmalloc (n_basic_blocks * sizeof (int));
+ for (i = 0; i < n_basic_blocks; ++i)
pdom[i] = INVALID_BLOCK;
calculate_dominance_info (pdom, NULL, CDI_POST_DOMINATORS);
/* Assume there is a path from each node to the exit block. */
- for (i = 0; i < last_basic_block; ++i)
+ for (i = 0; i < n_basic_blocks; ++i)
if (pdom[i] == INVALID_BLOCK)
pdom[i] = EXIT_BLOCK;
el = create_edge_list ();
@@ -719,8 +718,10 @@ ssa_eliminate_dead_code ()
/* Find any blocks with no successors and ensure they are followed
by a BARRIER. delete_insn has the nasty habit of deleting barriers
when deleting insns. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
+ basic_block bb = BASIC_BLOCK (i);
+
if (bb->succ == NULL)
{
rtx next = NEXT_INSN (bb->end);
diff --git a/gcc/ssa.c b/gcc/ssa.c
index a1dedfb..686339c 100644
--- a/gcc/ssa.c
+++ b/gcc/ssa.c
@@ -430,7 +430,7 @@ remove_phi_alternative (set, block)
int num_elem = GET_NUM_ELEM (phi_vec);
int v, c;
- c = block->sindex;
+ c = block->index;
for (v = num_elem - 2; v >= 0; v -= 2)
if (INTVAL (RTVEC_ELT (phi_vec, v + 1)) == c)
{
@@ -470,18 +470,18 @@ find_evaluations (evals, nregs)
sbitmap *evals;
int nregs;
{
- basic_block bb;
+ int bb;
sbitmap_vector_zero (evals, nregs);
fe_evals = evals;
- FOR_ALL_BB_REVERSE (bb)
+ for (bb = n_basic_blocks; --bb >= 0; )
{
rtx p, last;
- fe_current_bb = bb->sindex;
- p = bb->head;
- last = bb->end;
+ fe_current_bb = bb;
+ p = BLOCK_HEAD (bb);
+ last = BLOCK_END (bb);
while (1)
{
if (INSN_P (p))
@@ -520,7 +520,7 @@ compute_dominance_frontiers_1 (frontiers, idom, bb, done)
{
basic_block b = BASIC_BLOCK (bb);
edge e;
- basic_block c;
+ int c;
SET_BIT (done, bb);
sbitmap_zero (frontiers[bb]);
@@ -528,25 +528,25 @@ compute_dominance_frontiers_1 (frontiers, idom, bb, done)
/* Do the frontier of the children first. Not all children in the
dominator tree (blocks dominated by this one) are children in the
CFG, so check all blocks. */
- FOR_ALL_BB (c)
- if (idom[c->sindex] == bb && ! TEST_BIT (done, c->sindex))
- compute_dominance_frontiers_1 (frontiers, idom, c->sindex, done);
+ for (c = 0; c < n_basic_blocks; ++c)
+ if (idom[c] == bb && ! TEST_BIT (done, c))
+ compute_dominance_frontiers_1 (frontiers, idom, c, done);
/* Find blocks conforming to rule (1) above. */
for (e = b->succ; e; e = e->succ_next)
{
if (e->dest == EXIT_BLOCK_PTR)
continue;
- if (idom[e->dest->sindex] != bb)
- SET_BIT (frontiers[bb], e->dest->sindex);
+ if (idom[e->dest->index] != bb)
+ SET_BIT (frontiers[bb], e->dest->index);
}
/* Find blocks conforming to rule (2). */
- FOR_ALL_BB (c)
- if (idom[c->sindex] == bb)
+ for (c = 0; c < n_basic_blocks; ++c)
+ if (idom[c] == bb)
{
int x;
- EXECUTE_IF_SET_IN_SBITMAP (frontiers[c->sindex], 0, x,
+ EXECUTE_IF_SET_IN_SBITMAP (frontiers[c], 0, x,
{
if (idom[x] != bb)
SET_BIT (frontiers[bb], x);
@@ -559,7 +559,7 @@ compute_dominance_frontiers (frontiers, idom)
sbitmap *frontiers;
int *idom;
{
- sbitmap done = sbitmap_alloc (last_basic_block);
+ sbitmap done = sbitmap_alloc (n_basic_blocks);
sbitmap_zero (done);
compute_dominance_frontiers_1 (frontiers, idom, 0, done);
@@ -585,7 +585,7 @@ compute_iterated_dominance_frontiers (idfs, frontiers, evals, nregs)
sbitmap worklist;
int reg, passes = 0;
- worklist = sbitmap_alloc (last_basic_block);
+ worklist = sbitmap_alloc (n_basic_blocks);
for (reg = 0; reg < nregs; ++reg)
{
@@ -665,7 +665,7 @@ insert_phi_node (regno, bb)
if (e->src != ENTRY_BLOCK_PTR)
{
RTVEC_ELT (vec, i + 0) = pc_rtx;
- RTVEC_ELT (vec, i + 1) = GEN_INT (e->src->sindex);
+ RTVEC_ELT (vec, i + 1) = GEN_INT (e->src->index);
}
phi = gen_rtx_PHI (VOIDmode, vec);
@@ -975,7 +975,7 @@ rename_block (bb, idom)
edge e;
rtx insn, next, last;
struct rename_set_data *set_data = NULL;
- basic_block c;
+ int c;
/* Step One: Walk the basic block, adding new names for sets and
replacing uses. */
@@ -1078,9 +1078,9 @@ rename_block (bb, idom)
/* Step Three: Do the same to the children of this block in
dominator order. */
- FOR_ALL_BB (c)
- if (idom[c->sindex] == bb)
- rename_block (c->sindex, idom);
+ for (c = 0; c < n_basic_blocks; ++c)
+ if (idom[c] == bb)
+ rename_block (c, idom);
/* Step Four: Update the sets to refer to their new register,
and restore ssa_rename_to to its previous state. */
@@ -1140,8 +1140,6 @@ convert_to_ssa ()
int nregs;
- basic_block bb;
-
/* Don't do it twice. */
if (in_ssa_form)
abort ();
@@ -1150,27 +1148,28 @@ convert_to_ssa ()
dead code. We'll let the SSA optimizers do that. */
life_analysis (get_insns (), NULL, 0);
- idom = (int *) alloca (last_basic_block * sizeof (int));
- memset ((void *) idom, -1, (size_t) last_basic_block * sizeof (int));
+ idom = (int *) alloca (n_basic_blocks * sizeof (int));
+ memset ((void *) idom, -1, (size_t) n_basic_blocks * sizeof (int));
calculate_dominance_info (idom, NULL, CDI_DOMINATORS);
if (rtl_dump_file)
{
+ int i;
fputs (";; Immediate Dominators:\n", rtl_dump_file);
- FOR_ALL_BB (bb)
- fprintf (rtl_dump_file, ";\t%3d = %3d\n", bb->sindex, idom[bb->sindex]);
+ for (i = 0; i < n_basic_blocks; ++i)
+ fprintf (rtl_dump_file, ";\t%3d = %3d\n", i, idom[i]);
fflush (rtl_dump_file);
}
/* Compute dominance frontiers. */
- dfs = sbitmap_vector_alloc (last_basic_block, last_basic_block);
+ dfs = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
compute_dominance_frontiers (dfs, idom);
if (rtl_dump_file)
{
dump_sbitmap_vector (rtl_dump_file, ";; Dominance Frontiers:",
- "; Basic Block", dfs, last_basic_block);
+ "; Basic Block", dfs, n_basic_blocks);
fflush (rtl_dump_file);
}
@@ -1178,12 +1177,12 @@ convert_to_ssa ()
ssa_max_reg_num = max_reg_num ();
nregs = ssa_max_reg_num;
- evals = sbitmap_vector_alloc (nregs, last_basic_block);
+ evals = sbitmap_vector_alloc (nregs, n_basic_blocks);
find_evaluations (evals, nregs);
/* Compute the iterated dominance frontier for each register. */
- idfs = sbitmap_vector_alloc (nregs, last_basic_block);
+ idfs = sbitmap_vector_alloc (nregs, n_basic_blocks);
compute_iterated_dominance_frontiers (idfs, dfs, evals, nregs);
if (rtl_dump_file)
@@ -1384,7 +1383,7 @@ eliminate_phi (e, reg_partition)
n_nodes = 0;
for (; PHI_NODE_P (insn); insn = next_nonnote_insn (insn))
{
- rtx* preg = phi_alternative (PATTERN (insn), e->src->sindex);
+ rtx* preg = phi_alternative (PATTERN (insn), e->src->index);
rtx tgt = SET_DEST (PATTERN (insn));
rtx reg;
@@ -1446,7 +1445,7 @@ eliminate_phi (e, reg_partition)
insert_insn_on_edge (insn, e);
if (rtl_dump_file)
fprintf (rtl_dump_file, "Emitting copy on edge (%d,%d)\n",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
sbitmap_free (visited);
out:
@@ -1501,7 +1500,7 @@ make_regs_equivalent_over_bad_edges (bb, reg_partition)
for (e = b->pred; e; e = e->pred_next)
if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
{
- rtx *alt = phi_alternative (set, e->src->sindex);
+ rtx *alt = phi_alternative (set, e->src->index);
int alt_regno;
/* If there is no alternative corresponding to this edge,
@@ -1582,7 +1581,7 @@ make_equivalent_phi_alternatives_equivalent (bb, reg_partition)
/* Scan over edges. */
for (e = b->pred; e; e = e->pred_next)
{
- int pred_block = e->src->sindex;
+ int pred_block = e->src->index;
/* Identify the phi alternatives from both phi
nodes corresponding to this edge. */
rtx *alt = phi_alternative (set, pred_block);
@@ -1630,7 +1629,7 @@ make_equivalent_phi_alternatives_equivalent (bb, reg_partition)
static partition
compute_conservative_reg_partition ()
{
- basic_block bb;
+ int bb;
int changed = 0;
/* We don't actually work with hard registers, but it's easier to
@@ -1643,17 +1642,17 @@ compute_conservative_reg_partition ()
be copied on abnormal critical edges are placed in the same
partition. This saves us from having to split abnormal critical
edges. */
- FOR_ALL_BB_REVERSE (bb)
- changed += make_regs_equivalent_over_bad_edges (bb->sindex, p);
-
+ for (bb = n_basic_blocks; --bb >= 0; )
+ changed += make_regs_equivalent_over_bad_edges (bb, p);
+
/* Now we have to insure that corresponding arguments of phi nodes
assigning to corresponding regs are equivalent. Iterate until
nothing changes. */
while (changed > 0)
{
changed = 0;
- FOR_ALL_BB_REVERSE (bb)
- changed += make_equivalent_phi_alternatives_equivalent (bb->sindex, p);
+ for (bb = n_basic_blocks; --bb >= 0; )
+ changed += make_equivalent_phi_alternatives_equivalent (bb, p);
}
return p;
@@ -1849,7 +1848,7 @@ coalesce_regs_in_successor_phi_nodes (bb, p, conflicts)
static partition
compute_coalesced_reg_partition ()
{
- basic_block bb;
+ int bb;
int changed = 0;
regset_head phi_set_head;
regset phi_set = &phi_set_head;
@@ -1861,8 +1860,8 @@ compute_coalesced_reg_partition ()
be copied on abnormal critical edges are placed in the same
partition. This saves us from having to split abnormal critical
edges (which can't be done). */
- FOR_ALL_BB_REVERSE (bb)
- make_regs_equivalent_over_bad_edges (bb->sindex, p);
+ for (bb = n_basic_blocks; --bb >= 0; )
+ make_regs_equivalent_over_bad_edges (bb, p);
INIT_REG_SET (phi_set);
@@ -1884,11 +1883,12 @@ compute_coalesced_reg_partition ()
blocks first, so that most frequently executed copies would
be more likely to be removed by register coalescing. But any
order will generate correct, if non-optimal, results. */
- FOR_ALL_BB_REVERSE (bb)
+ for (bb = n_basic_blocks; --bb >= 0; )
{
- changed += coalesce_regs_in_copies (bb, p, conflicts);
- changed +=
- coalesce_regs_in_successor_phi_nodes (bb, p, conflicts);
+ basic_block block = BASIC_BLOCK (bb);
+ changed += coalesce_regs_in_copies (block, p, conflicts);
+ changed +=
+ coalesce_regs_in_successor_phi_nodes (block, p, conflicts);
}
conflict_graph_delete (conflicts);
@@ -2094,10 +2094,11 @@ static void
rename_equivalent_regs (reg_partition)
partition reg_partition;
{
- basic_block b;
+ int bb;
- FOR_ALL_BB_REVERSE (b)
+ for (bb = n_basic_blocks; --bb >= 0; )
{
+ basic_block b = BASIC_BLOCK (bb);
rtx next = b->head;
rtx last = b->end;
rtx insn;
@@ -2140,7 +2141,7 @@ rename_equivalent_regs (reg_partition)
void
convert_from_ssa ()
{
- basic_block b, bb;
+ int bb;
partition reg_partition;
rtx insns = get_insns ();
@@ -2166,8 +2167,9 @@ convert_from_ssa ()
rename_equivalent_regs (reg_partition);
/* Eliminate the PHI nodes. */
- FOR_ALL_BB_REVERSE (b)
+ for (bb = n_basic_blocks; --bb >= 0; )
{
+ basic_block b = BASIC_BLOCK (bb);
edge e;
for (e = b->pred; e; e = e->pred_next)
@@ -2178,17 +2180,17 @@ convert_from_ssa ()
partition_delete (reg_partition);
/* Actually delete the PHI nodes. */
- FOR_ALL_BB_REVERSE (bb)
+ for (bb = n_basic_blocks; --bb >= 0; )
{
- rtx insn = bb->head;
+ rtx insn = BLOCK_HEAD (bb);
while (1)
{
/* If this is a PHI node delete it. */
if (PHI_NODE_P (insn))
{
- if (insn == bb->end)
- bb->end = PREV_INSN (insn);
+ if (insn == BLOCK_END (bb))
+ BLOCK_END (bb) = PREV_INSN (insn);
insn = delete_insn (insn);
}
/* Since all the phi nodes come at the beginning of the
@@ -2197,7 +2199,7 @@ convert_from_ssa ()
else if (INSN_P (insn))
break;
/* If we've reached the end of the block, stop. */
- else if (insn == bb->end)
+ else if (insn == BLOCK_END (bb))
break;
else
insn = NEXT_INSN (insn);
@@ -2257,7 +2259,7 @@ for_each_successor_phi (bb, fn, data)
{
int result;
rtx phi_set = PATTERN (insn);
- rtx *alternative = phi_alternative (phi_set, bb->sindex);
+ rtx *alternative = phi_alternative (phi_set, bb->index);
rtx phi_src;
/* This phi function may not have an alternative