aboutsummaryrefslogtreecommitdiff
path: root/gcc/gcse.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2002-05-16 19:31:56 -0700
committerRichard Henderson <rth@gcc.gnu.org>2002-05-16 19:31:56 -0700
commit0b17ab2f5b1184fdb568786f791bc0613e574241 (patch)
tree94c8895c6dde3b282518d4c9951067cd0ac517fd /gcc/gcse.c
parent8ae86b3cd8c96e287714f127879b018ac7fccd7d (diff)
downloadgcc-0b17ab2f5b1184fdb568786f791bc0613e574241.zip
gcc-0b17ab2f5b1184fdb568786f791bc0613e574241.tar.gz
gcc-0b17ab2f5b1184fdb568786f791bc0613e574241.tar.bz2
Revert "Basic block renumbering removal", and two followup patches.
From-SVN: r53537
Diffstat (limited to 'gcc/gcse.c')
-rw-r--r--gcc/gcse.c478
1 files changed, 237 insertions, 241 deletions
diff --git a/gcc/gcse.c b/gcc/gcse.c
index e4e1770..9320053 100644
--- a/gcc/gcse.c
+++ b/gcc/gcse.c
@@ -541,7 +541,7 @@ static sbitmap *ae_kill, *ae_gen, *ae_in, *ae_out;
struct null_pointer_info
{
/* The basic block being processed. */
- basic_block current_block;
+ int current_block;
/* The first register to be handled in this pass. */
unsigned int min_reg;
/* One greater than the last register to be handled in this pass. */
@@ -740,9 +740,9 @@ gcse_main (f, file)
if (file)
dump_flow_info (file);
- orig_bb_count = num_basic_blocks;
+ orig_bb_count = n_basic_blocks;
/* Return if there's nothing to do. */
- if (num_basic_blocks <= 1)
+ if (n_basic_blocks <= 1)
return 0;
/* Trying to perform global optimizations on flow graphs which have
@@ -753,23 +753,23 @@ gcse_main (f, file)
as blocks. But we do not want to punish small functions which have
a couple switch statements. So we require a relatively large number
of basic blocks and the ratio of edges to blocks to be high. */
- if (num_basic_blocks > 1000 && n_edges / num_basic_blocks >= 20)
+ if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
{
if (warn_disabled_optimization)
warning ("GCSE disabled: %d > 1000 basic blocks and %d >= 20 edges/basic block",
- num_basic_blocks, n_edges / num_basic_blocks);
+ n_basic_blocks, n_edges / n_basic_blocks);
return 0;
}
/* If allocating memory for the cprop bitmap would take up too much
storage it's better just to disable the optimization. */
- if ((num_basic_blocks
+ if ((n_basic_blocks
* SBITMAP_SET_SIZE (max_gcse_regno)
* sizeof (SBITMAP_ELT_TYPE)) > MAX_GCSE_MEMORY)
{
if (warn_disabled_optimization)
warning ("GCSE disabled: %d basic blocks and %d registers",
- num_basic_blocks, max_gcse_regno);
+ n_basic_blocks, max_gcse_regno);
return 0;
}
@@ -834,12 +834,12 @@ gcse_main (f, file)
{
free_modify_mem_tables ();
modify_mem_list
- = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
+ = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
canon_modify_mem_list
- = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
- memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
- memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
- orig_bb_count = num_basic_blocks;
+ = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ orig_bb_count = n_basic_blocks;
}
free_reg_set_mem ();
alloc_reg_set_mem (max_reg_num ());
@@ -894,7 +894,7 @@ gcse_main (f, file)
if (file)
{
fprintf (file, "GCSE of %s: %d basic blocks, ",
- current_function_name, num_basic_blocks);
+ current_function_name, n_basic_blocks);
fprintf (file, "%d pass%s, %d bytes\n\n",
pass, pass > 1 ? "es" : "", max_pass_bytes);
}
@@ -1019,14 +1019,14 @@ alloc_gcse_mem (f)
reg_set_bitmap = BITMAP_XMALLOC ();
/* Allocate vars to track sets of regs, memory per block. */
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
max_gcse_regno);
/* Allocate array to keep a list of insns which modify memory in each
basic block. */
- modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
- canon_modify_mem_list = (rtx *) gmalloc (last_basic_block * sizeof (rtx));
- memset ((char *) modify_mem_list, 0, last_basic_block * sizeof (rtx));
- memset ((char *) canon_modify_mem_list, 0, last_basic_block * sizeof (rtx));
+ modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
+ canon_modify_mem_list = (rtx *) gmalloc (n_basic_blocks * sizeof (rtx));
+ memset ((char *) modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
+ memset ((char *) canon_modify_mem_list, 0, n_basic_blocks * sizeof (rtx));
modify_mem_list_set = BITMAP_XMALLOC ();
canon_modify_mem_list_set = BITMAP_XMALLOC ();
}
@@ -1132,15 +1132,15 @@ compute_local_properties (transp, comp, antloc, setp)
if (transp)
{
if (setp)
- sbitmap_vector_zero (transp, last_basic_block);
+ sbitmap_vector_zero (transp, n_basic_blocks);
else
- sbitmap_vector_ones (transp, last_basic_block);
+ sbitmap_vector_ones (transp, n_basic_blocks);
}
if (comp)
- sbitmap_vector_zero (comp, last_basic_block);
+ sbitmap_vector_zero (comp, n_basic_blocks);
if (antloc)
- sbitmap_vector_zero (antloc, last_basic_block);
+ sbitmap_vector_zero (antloc, n_basic_blocks);
/* We use the same code for cprop, pre and hoisting. For cprop
we care about the set hash table, for pre and hoisting we
@@ -1292,13 +1292,13 @@ compute_sets (f)
struct reg_avail_info
{
- basic_block last_bb;
+ int last_bb;
int first_set;
int last_set;
};
static struct reg_avail_info *reg_avail_info;
-static basic_block current_bb;
+static int current_bb;
/* See whether X, the source of a set, is something we want to consider for
@@ -1385,7 +1385,7 @@ oprs_unchanged_p (x, insn, avail_p)
}
case MEM:
- if (load_killed_in_block_p (current_bb, INSN_CUID (insn),
+ if (load_killed_in_block_p (BASIC_BLOCK (current_bb), INSN_CUID (insn),
x, avail_p))
return 0;
else
@@ -1499,7 +1499,7 @@ load_killed_in_block_p (bb, uid_limit, x, avail_p)
rtx x;
int avail_p;
{
- rtx list_entry = modify_mem_list[bb->sindex];
+ rtx list_entry = modify_mem_list[bb->index];
while (list_entry)
{
rtx setter;
@@ -2373,7 +2373,7 @@ record_last_reg_set_info (insn, regno)
{
info->last_bb = current_bb;
info->first_set = cuid;
- SET_BIT (reg_set_in_block[current_bb->sindex], regno);
+ SET_BIT (reg_set_in_block[current_bb], regno);
}
}
@@ -2493,7 +2493,7 @@ compute_hash_table (set_p)
registers are set in which blocks.
??? This isn't needed during const/copy propagation, but it's cheap to
compute. Later. */
- sbitmap_vector_zero (reg_set_in_block, last_basic_block);
+ sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
/* re-Cache any INSN_LIST nodes we have allocated. */
clear_modify_mem_tables ();
@@ -2502,9 +2502,9 @@ compute_hash_table (set_p)
gmalloc (max_gcse_regno * sizeof (struct reg_avail_info));
for (i = 0; i < max_gcse_regno; ++i)
- reg_avail_info[i].last_bb = NULL;
+ reg_avail_info[i].last_bb = NEVER_SET;
- FOR_ALL_BB (current_bb)
+ for (current_bb = 0; current_bb < n_basic_blocks; current_bb++)
{
rtx insn;
unsigned int regno;
@@ -2515,8 +2515,8 @@ compute_hash_table (set_p)
??? hard-reg reg_set_in_block computation
could be moved to compute_sets since they currently don't change. */
- for (insn = current_bb->head;
- insn && insn != NEXT_INSN (current_bb->end);
+ for (insn = BLOCK_HEAD (current_bb);
+ insn && insn != NEXT_INSN (BLOCK_END (current_bb));
insn = NEXT_INSN (insn))
{
if (! INSN_P (insn))
@@ -2544,8 +2544,8 @@ compute_hash_table (set_p)
/* The next pass builds the hash table. */
- for (insn = current_bb->head, in_libcall_block = 0;
- insn && insn != NEXT_INSN (current_bb->end);
+ for (insn = BLOCK_HEAD (current_bb), in_libcall_block = 0;
+ insn && insn != NEXT_INSN (BLOCK_END (current_bb));
insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
@@ -2938,16 +2938,16 @@ alloc_rd_mem (n_blocks, n_insns)
int n_blocks, n_insns;
{
rd_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_kill, last_basic_block);
+ sbitmap_vector_zero (rd_kill, n_basic_blocks);
rd_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_gen, last_basic_block);
+ sbitmap_vector_zero (rd_gen, n_basic_blocks);
reaching_defs = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (reaching_defs, last_basic_block);
+ sbitmap_vector_zero (reaching_defs, n_basic_blocks);
rd_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_insns);
- sbitmap_vector_zero (rd_out, last_basic_block);
+ sbitmap_vector_zero (rd_out, n_basic_blocks);
}
/* Free reaching def variables. */
@@ -2973,7 +2973,7 @@ handle_rd_kill_set (insn, regno, bb)
for (this_reg = reg_set_table[regno]; this_reg; this_reg = this_reg ->next)
if (BLOCK_NUM (this_reg->insn) != BLOCK_NUM (insn))
- SET_BIT (rd_kill[bb->sindex], INSN_CUID (this_reg->insn));
+ SET_BIT (rd_kill[bb->index], INSN_CUID (this_reg->insn));
}
/* Compute the set of kill's for reaching definitions. */
@@ -2981,10 +2981,9 @@ handle_rd_kill_set (insn, regno, bb)
static void
compute_kill_rd ()
{
- int cuid;
+ int bb, cuid;
unsigned int regno;
int i;
- basic_block bb;
/* For each block
For each set bit in `gen' of the block (i.e each insn which
@@ -2994,9 +2993,9 @@ compute_kill_rd ()
For each setting of regx in the linked list, which is not in
this block
Set the bit in `kill' corresponding to that insn. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
for (cuid = 0; cuid < max_cuid; cuid++)
- if (TEST_BIT (rd_gen[bb->sindex], cuid))
+ if (TEST_BIT (rd_gen[bb], cuid))
{
rtx insn = CUID_INSN (cuid);
rtx pat = PATTERN (insn);
@@ -3005,7 +3004,7 @@ compute_kill_rd ()
{
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
- handle_rd_kill_set (insn, regno, bb);
+ handle_rd_kill_set (insn, regno, BASIC_BLOCK (bb));
}
if (GET_CODE (pat) == PARALLEL)
@@ -3018,13 +3017,13 @@ compute_kill_rd ()
&& GET_CODE (XEXP (XVECEXP (pat, 0, i), 0)) == REG)
handle_rd_kill_set (insn,
REGNO (XEXP (XVECEXP (pat, 0, i), 0)),
- bb);
+ BASIC_BLOCK (bb));
}
}
else if (GET_CODE (pat) == SET && GET_CODE (SET_DEST (pat)) == REG)
/* Each setting of this register outside of this block
must be marked in the set of kills in this block. */
- handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), bb);
+ handle_rd_kill_set (insn, REGNO (SET_DEST (pat)), BASIC_BLOCK (bb));
}
}
@@ -3036,22 +3035,21 @@ compute_kill_rd ()
static void
compute_rd ()
{
- int changed, passes;
- basic_block bb;
+ int bb, changed, passes;
- FOR_ALL_BB (bb)
- sbitmap_copy (rd_out[bb->sindex] /*dst*/, rd_gen[bb->sindex] /*src*/);
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ sbitmap_copy (rd_out[bb] /*dst*/, rd_gen[bb] /*src*/);
passes = 0;
changed = 1;
while (changed)
{
changed = 0;
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- sbitmap_union_of_preds (reaching_defs[bb->sindex], rd_out, bb->sindex);
- changed |= sbitmap_union_of_diff_cg (rd_out[bb->sindex], rd_gen[bb->sindex],
- reaching_defs[bb->sindex], rd_kill[bb->sindex]);
+ sbitmap_union_of_preds (reaching_defs[bb], rd_out, bb);
+ changed |= sbitmap_union_of_diff_cg (rd_out[bb], rd_gen[bb],
+ reaching_defs[bb], rd_kill[bb]);
}
passes++;
}
@@ -3069,16 +3067,16 @@ alloc_avail_expr_mem (n_blocks, n_exprs)
int n_blocks, n_exprs;
{
ae_kill = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_kill, n_blocks);
+ sbitmap_vector_zero (ae_kill, n_basic_blocks);
ae_gen = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_gen, n_blocks);
+ sbitmap_vector_zero (ae_gen, n_basic_blocks);
ae_in = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_in, n_blocks);
+ sbitmap_vector_zero (ae_in, n_basic_blocks);
ae_out = (sbitmap *) sbitmap_vector_alloc (n_blocks, n_exprs);
- sbitmap_vector_zero (ae_out, n_blocks);
+ sbitmap_vector_zero (ae_out, n_basic_blocks);
}
static void
@@ -3127,7 +3125,7 @@ expr_killed_p (x, bb)
switch (code)
{
case REG:
- return TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x));
+ return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
case MEM:
if (load_killed_in_block_p (bb, get_max_uid () + 1, x, 0))
@@ -3178,20 +3176,20 @@ static void
compute_ae_kill (ae_gen, ae_kill)
sbitmap *ae_gen, *ae_kill;
{
- basic_block bb;
+ int bb;
unsigned int i;
struct expr *expr;
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
for (i = 0; i < expr_hash_table_size; i++)
for (expr = expr_hash_table[i]; expr; expr = expr->next_same_hash)
{
/* Skip EXPR if generated in this block. */
- if (TEST_BIT (ae_gen[bb->sindex], expr->bitmap_index))
+ if (TEST_BIT (ae_gen[bb], expr->bitmap_index))
continue;
- if (expr_killed_p (expr->expr, bb))
- SET_BIT (ae_kill[bb->sindex], expr->bitmap_index);
+ if (expr_killed_p (expr->expr, BASIC_BLOCK (bb)))
+ SET_BIT (ae_kill[bb], expr->bitmap_index);
}
}
@@ -3227,40 +3225,40 @@ expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited)
{
basic_block pred_bb = pred->src;
- if (visited[pred_bb->sindex])
+ if (visited[pred_bb->index])
/* This predecessor has already been visited. Nothing to do. */
;
else if (pred_bb == bb)
{
/* BB loops on itself. */
if (check_self_loop
- && TEST_BIT (ae_gen[pred_bb->sindex], expr->bitmap_index)
- && BLOCK_NUM (occr->insn) == pred_bb->sindex)
+ && TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index)
+ && BLOCK_NUM (occr->insn) == pred_bb->index)
return 1;
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
}
/* Ignore this predecessor if it kills the expression. */
- else if (TEST_BIT (ae_kill[pred_bb->sindex], expr->bitmap_index))
- visited[pred_bb->sindex] = 1;
+ else if (TEST_BIT (ae_kill[pred_bb->index], expr->bitmap_index))
+ visited[pred_bb->index] = 1;
/* Does this predecessor generate this expression? */
- else if (TEST_BIT (ae_gen[pred_bb->sindex], expr->bitmap_index))
+ else if (TEST_BIT (ae_gen[pred_bb->index], expr->bitmap_index))
{
/* Is this the occurrence we're looking for?
Note that there's only one generating occurrence per block
so we just need to check the block number. */
- if (BLOCK_NUM (occr->insn) == pred_bb->sindex)
+ if (BLOCK_NUM (occr->insn) == pred_bb->index)
return 1;
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
}
/* Neither gen nor kill. */
else
{
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
if (expr_reaches_here_p_work (occr, expr, pred_bb, check_self_loop,
visited))
@@ -3283,7 +3281,7 @@ expr_reaches_here_p (occr, expr, bb, check_self_loop)
int check_self_loop;
{
int rval;
- char *visited = (char *) xcalloc (last_basic_block, 1);
+ char *visited = (char *) xcalloc (n_basic_blocks, 1);
rval = expr_reaches_here_p_work (occr, expr, bb, check_self_loop, visited);
@@ -3607,21 +3605,20 @@ handle_avail_expr (insn, expr)
static int
classic_gcse ()
{
- int changed;
+ int bb, changed;
rtx insn;
- basic_block bb;
/* Note we start at block 1. */
changed = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
+ for (bb = 1; bb < n_basic_blocks; bb++)
{
/* Reset tables used to keep track of what's still valid [since the
start of the block]. */
reset_opr_set_tables ();
- for (insn = bb->head;
- insn != NULL && insn != NEXT_INSN (bb->end);
+ for (insn = BLOCK_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
{
/* Is insn of form (set (pseudo-reg) ...)? */
@@ -3639,7 +3636,7 @@ classic_gcse ()
&& ((expr = lookup_expr (src)) != NULL)
/* Is the expression available [at the start of the
block]? */
- && TEST_BIT (ae_in[bb->sindex], expr->bitmap_index)
+ && TEST_BIT (ae_in[bb], expr->bitmap_index)
/* Are the operands unchanged since the start of the
block? */
&& oprs_not_set_p (src, insn))
@@ -3670,7 +3667,7 @@ one_classic_gcse_pass (pass)
gcse_create_count = 0;
alloc_expr_hash_table (max_cuid);
- alloc_rd_mem (last_basic_block, max_cuid);
+ alloc_rd_mem (n_basic_blocks, max_cuid);
compute_expr_hash_table ();
if (gcse_file)
dump_hash_table (gcse_file, "Expression", expr_hash_table,
@@ -3680,7 +3677,7 @@ one_classic_gcse_pass (pass)
{
compute_kill_rd ();
compute_rd ();
- alloc_avail_expr_mem (last_basic_block, n_exprs);
+ alloc_avail_expr_mem (n_basic_blocks, n_exprs);
compute_ae_gen ();
compute_ae_kill (ae_gen, ae_kill);
compute_available (ae_gen, ae_kill, ae_out, ae_in);
@@ -3750,8 +3747,7 @@ compute_transp (x, indx, bmap, set_p)
sbitmap *bmap;
int set_p;
{
- int i, j;
- basic_block bb;
+ int bb, i, j;
enum rtx_code code;
reg_set *r;
const char *fmt;
@@ -3771,9 +3767,9 @@ compute_transp (x, indx, bmap, set_p)
{
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
{
- FOR_ALL_BB (bb)
- if (TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x)))
- SET_BIT (bmap[bb->sindex], indx);
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
+ SET_BIT (bmap[bb], indx);
}
else
{
@@ -3785,9 +3781,9 @@ compute_transp (x, indx, bmap, set_p)
{
if (REGNO (x) < FIRST_PSEUDO_REGISTER)
{
- FOR_ALL_BB (bb)
- if (TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x)))
- RESET_BIT (bmap[bb->sindex], indx);
+ for (bb = 0; bb < n_basic_blocks; bb++)
+ if (TEST_BIT (reg_set_in_block[bb], REGNO (x)))
+ RESET_BIT (bmap[bb], indx);
}
else
{
@@ -3799,9 +3795,9 @@ compute_transp (x, indx, bmap, set_p)
return;
case MEM:
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- rtx list_entry = canon_modify_mem_list[bb->sindex];
+ rtx list_entry = canon_modify_mem_list[bb];
while (list_entry)
{
@@ -3810,9 +3806,9 @@ compute_transp (x, indx, bmap, set_p)
if (GET_CODE (XEXP (list_entry, 0)) == CALL_INSN)
{
if (set_p)
- SET_BIT (bmap[bb->sindex], indx);
+ SET_BIT (bmap[bb], indx);
else
- RESET_BIT (bmap[bb->sindex], indx);
+ RESET_BIT (bmap[bb], indx);
break;
}
/* LIST_ENTRY must be an INSN of some kind that sets memory.
@@ -3826,9 +3822,9 @@ compute_transp (x, indx, bmap, set_p)
x, rtx_addr_varies_p))
{
if (set_p)
- SET_BIT (bmap[bb->sindex], indx);
+ SET_BIT (bmap[bb], indx);
else
- RESET_BIT (bmap[bb->sindex], indx);
+ RESET_BIT (bmap[bb], indx);
break;
}
list_entry = XEXP (list_entry, 1);
@@ -4292,25 +4288,24 @@ static int
cprop (alter_jumps)
int alter_jumps;
{
- int changed;
- basic_block bb;
+ int bb, changed;
rtx insn;
/* Note we start at block 1. */
changed = 0;
- FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR->next_bb->next_bb, EXIT_BLOCK_PTR, next_bb)
+ for (bb = 1; bb < n_basic_blocks; bb++)
{
/* Reset tables used to keep track of what's still valid [since the
start of the block]. */
reset_opr_set_tables ();
- for (insn = bb->head;
- insn != NULL && insn != NEXT_INSN (bb->head);
+ for (insn = BLOCK_HEAD (bb);
+ insn != NULL && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
if (INSN_P (insn))
{
- changed |= cprop_insn (bb, insn, alter_jumps);
+ changed |= cprop_insn (BASIC_BLOCK (bb), insn, alter_jumps);
/* Keep track of everything modified by this insn. */
/* ??? Need to be careful w.r.t. mods done to INSN. Don't
@@ -4347,7 +4342,7 @@ one_cprop_pass (pass, alter_jumps)
n_sets);
if (n_sets > 0)
{
- alloc_cprop_mem (last_basic_block, n_sets);
+ alloc_cprop_mem (n_basic_blocks, n_sets);
compute_cprop_data ();
changed = cprop (alter_jumps);
free_cprop_mem ();
@@ -4457,11 +4452,11 @@ static void
compute_pre_data ()
{
sbitmap trapping_expr;
- basic_block bb;
+ int i;
unsigned int ui;
compute_local_properties (transp, comp, antloc, 0);
- sbitmap_vector_zero (ae_kill, last_basic_block);
+ sbitmap_vector_zero (ae_kill, n_basic_blocks);
/* Collect expressions which might trap. */
trapping_expr = sbitmap_alloc (n_exprs);
@@ -4480,7 +4475,7 @@ compute_pre_data ()
This is significantly faster than compute_ae_kill. */
- FOR_ALL_BB (bb)
+ for (i = 0; i < n_basic_blocks; i++)
{
edge e;
@@ -4488,16 +4483,16 @@ compute_pre_data ()
kill all trapping expressions because we won't be able to properly
place the instruction on the edge. So make them neither
anticipatable nor transparent. This is fairly conservative. */
- for (e = bb->pred; e ; e = e->pred_next)
+ for (e = BASIC_BLOCK (i)->pred; e ; e = e->pred_next)
if (e->flags & EDGE_ABNORMAL)
{
- sbitmap_difference (antloc[bb->sindex], antloc[bb->sindex], trapping_expr);
- sbitmap_difference (transp[bb->sindex], transp[bb->sindex], trapping_expr);
+ sbitmap_difference (antloc[i], antloc[i], trapping_expr);
+ sbitmap_difference (transp[i], transp[i], trapping_expr);
break;
}
- sbitmap_a_or_b (ae_kill[bb->sindex], transp[bb->sindex], comp[bb->sindex]);
- sbitmap_not (ae_kill[bb->sindex], ae_kill[bb->sindex]);
+ sbitmap_a_or_b (ae_kill[i], transp[i], comp[i]);
+ sbitmap_not (ae_kill[i], ae_kill[i]);
}
edge_list = pre_edge_lcm (gcse_file, n_exprs, transp, comp, antloc,
@@ -4539,11 +4534,11 @@ pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
if (pred->src == ENTRY_BLOCK_PTR
/* Has predecessor has already been visited? */
- || visited[pred_bb->sindex])
+ || visited[pred_bb->index])
;/* Nothing to do. */
/* Does this predecessor generate this expression? */
- else if (TEST_BIT (comp[pred_bb->sindex], expr->bitmap_index))
+ else if (TEST_BIT (comp[pred_bb->index], expr->bitmap_index))
{
/* Is this the occurrence we're looking for?
Note that there's only one generating occurrence per block
@@ -4551,16 +4546,16 @@ pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited)
if (occr_bb == pred_bb)
return 1;
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
}
/* Ignore this predecessor if it kills the expression. */
- else if (! TEST_BIT (transp[pred_bb->sindex], expr->bitmap_index))
- visited[pred_bb->sindex] = 1;
+ else if (! TEST_BIT (transp[pred_bb->index], expr->bitmap_index))
+ visited[pred_bb->index] = 1;
/* Neither gen nor kill. */
else
{
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
if (pre_expr_reaches_here_p_work (occr_bb, expr, pred_bb, visited))
return 1;
}
@@ -4580,7 +4575,7 @@ pre_expr_reaches_here_p (occr_bb, expr, bb)
basic_block bb;
{
int rval;
- char *visited = (char *) xcalloc (last_basic_block, 1);
+ char *visited = (char *) xcalloc (n_basic_blocks, 1);
rval = pre_expr_reaches_here_p_work (occr_bb, expr, bb, visited);
@@ -4658,8 +4653,8 @@ insert_insn_end_bb (expr, bb, pre)
anywhere in the basic block with performing PRE optimizations.
Check this. */
if (GET_CODE (insn) == INSN && pre
- && !TEST_BIT (antloc[bb->sindex], expr->bitmap_index)
- && !TEST_BIT (transp[bb->sindex], expr->bitmap_index))
+ && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ && !TEST_BIT (transp[bb->index], expr->bitmap_index))
abort ();
/* If this is a jump table, then we can't insert stuff here. Since
@@ -4703,8 +4698,8 @@ insert_insn_end_bb (expr, bb, pre)
Check this. */
if (pre
- && !TEST_BIT (antloc[bb->sindex], expr->bitmap_index)
- && !TEST_BIT (transp[bb->sindex], expr->bitmap_index))
+ && !TEST_BIT (antloc[bb->index], expr->bitmap_index)
+ && !TEST_BIT (transp[bb->index], expr->bitmap_index))
abort ();
/* Since different machines initialize their parameter registers
@@ -4756,7 +4751,7 @@ insert_insn_end_bb (expr, bb, pre)
if (gcse_file)
{
fprintf (gcse_file, "PRE/HOIST: end of bb %d, insn %d, ",
- bb->sindex, INSN_UID (new_insn));
+ bb->index, INSN_UID (new_insn));
fprintf (gcse_file, "copying expression %d to reg %d\n",
expr->bitmap_index, regno);
}
@@ -4827,8 +4822,8 @@ pre_edge_insert (edge_list, index_map)
if (gcse_file)
{
fprintf (gcse_file, "PRE/HOIST: edge (%d,%d), ",
- bb->sindex,
- INDEX_EDGE_SUCC_BB (edge_list, e)->sindex);
+ bb->index,
+ INDEX_EDGE_SUCC_BB (edge_list, e)->index);
fprintf (gcse_file, "copy expression %d\n",
expr->bitmap_index);
}
@@ -4967,7 +4962,7 @@ pre_delete ()
rtx set;
basic_block bb = BLOCK_FOR_INSN (insn);
- if (TEST_BIT (pre_delete_map[bb->sindex], indx))
+ if (TEST_BIT (pre_delete_map[bb->index], indx))
{
set = single_set (insn);
if (! set)
@@ -5002,7 +4997,7 @@ pre_delete ()
"PRE: redundant insn %d (expression %d) in ",
INSN_UID (insn), indx);
fprintf (gcse_file, "bb %d, reaching reg is %d\n",
- bb->sindex, REGNO (expr->reaching_reg));
+ bb->index, REGNO (expr->reaching_reg));
}
}
}
@@ -5100,7 +5095,7 @@ one_pre_gcse_pass (pass)
if (n_exprs > 0)
{
- alloc_pre_mem (last_basic_block, n_exprs);
+ alloc_pre_mem (n_basic_blocks, n_exprs);
compute_pre_data ();
changed |= pre_gcse ();
free_edge_list (edge_list);
@@ -5184,18 +5179,18 @@ add_label_notes (x, insn)
static void
compute_transpout ()
{
- basic_block bb;
+ int bb;
unsigned int i;
struct expr *expr;
- sbitmap_vector_ones (transpout, last_basic_block);
+ sbitmap_vector_ones (transpout, n_basic_blocks);
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; ++bb)
{
/* Note that flow inserted a nop a the end of basic blocks that
end in call instructions for reasons other than abnormal
control flow. */
- if (GET_CODE (bb->end) != CALL_INSN)
+ if (GET_CODE (BLOCK_END (bb)) != CALL_INSN)
continue;
for (i = 0; i < expr_hash_table_size; i++)
@@ -5209,7 +5204,7 @@ compute_transpout ()
/* ??? Optimally, we would use interprocedural alias
analysis to determine if this mem is actually killed
by this call. */
- RESET_BIT (transpout[bb->sindex], expr->bitmap_index);
+ RESET_BIT (transpout[bb], expr->bitmap_index);
}
}
}
@@ -5242,8 +5237,8 @@ invalidate_nonnull_info (x, setter, data)
regno = REGNO (x) - npi->min_reg;
- RESET_BIT (npi->nonnull_local[npi->current_block->sindex], regno);
- SET_BIT (npi->nonnull_killed[npi->current_block->sindex], regno);
+ RESET_BIT (npi->nonnull_local[npi->current_block], regno);
+ SET_BIT (npi->nonnull_killed[npi->current_block], regno);
}
/* Do null-pointer check elimination for the registers indicated in
@@ -5258,7 +5253,8 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
sbitmap *nonnull_avout;
struct null_pointer_info *npi;
{
- basic_block bb, current_block;
+ int bb;
+ int current_block;
sbitmap *nonnull_local = npi->nonnull_local;
sbitmap *nonnull_killed = npi->nonnull_killed;
@@ -5270,10 +5266,10 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
Note that a register can have both properties in a single block. That
indicates that it's killed, then later in the block a new value is
computed. */
- sbitmap_vector_zero (nonnull_local, last_basic_block);
- sbitmap_vector_zero (nonnull_killed, last_basic_block);
+ sbitmap_vector_zero (nonnull_local, n_basic_blocks);
+ sbitmap_vector_zero (nonnull_killed, n_basic_blocks);
- FOR_ALL_BB (current_block)
+ for (current_block = 0; current_block < n_basic_blocks; current_block++)
{
rtx insn, stop_insn;
@@ -5282,8 +5278,8 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
/* Scan each insn in the basic block looking for memory references and
register sets. */
- stop_insn = NEXT_INSN (current_block->end);
- for (insn = current_block->head;
+ stop_insn = NEXT_INSN (BLOCK_END (current_block));
+ for (insn = BLOCK_HEAD (current_block);
insn != stop_insn;
insn = NEXT_INSN (insn))
{
@@ -5311,7 +5307,7 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
&& GET_CODE ((reg = XEXP (SET_SRC (set), 0))) == REG
&& REGNO (reg) >= npi->min_reg
&& REGNO (reg) < npi->max_reg)
- SET_BIT (nonnull_local[current_block->sindex],
+ SET_BIT (nonnull_local[current_block],
REGNO (reg) - npi->min_reg);
/* Now invalidate stuff clobbered by this insn. */
@@ -5324,7 +5320,7 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
&& GET_CODE ((reg = XEXP (SET_DEST (set), 0))) == REG
&& REGNO (reg) >= npi->min_reg
&& REGNO (reg) < npi->max_reg)
- SET_BIT (nonnull_local[current_block->sindex],
+ SET_BIT (nonnull_local[current_block],
REGNO (reg) - npi->min_reg);
}
}
@@ -5336,17 +5332,17 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
/* Now look at each bb and see if it ends with a compare of a value
against zero. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- rtx last_insn = bb->end;
+ rtx last_insn = BLOCK_END (bb);
rtx condition, earliest;
int compare_and_branch;
/* Since MIN_REG is always at least FIRST_PSEUDO_REGISTER, and
since BLOCK_REG[BB] is zero if this block did not end with a
comparison against zero, this condition works. */
- if (block_reg[bb->sindex] < npi->min_reg
- || block_reg[bb->sindex] >= npi->max_reg)
+ if (block_reg[bb] < npi->min_reg
+ || block_reg[bb] >= npi->max_reg)
continue;
/* LAST_INSN is a conditional jump. Get its condition. */
@@ -5357,7 +5353,7 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
continue;
/* Is the register known to have a nonzero value? */
- if (!TEST_BIT (nonnull_avout[bb->sindex], block_reg[bb->sindex] - npi->min_reg))
+ if (!TEST_BIT (nonnull_avout[bb], block_reg[bb] - npi->min_reg))
continue;
/* Try to compute whether the compare/branch at the loop end is one or
@@ -5385,12 +5381,12 @@ delete_null_pointer_checks_1 (block_reg, nonnull_avin,
delete_insn (last_insn);
if (compare_and_branch == 2)
delete_insn (earliest);
- purge_dead_edges (bb);
+ purge_dead_edges (BASIC_BLOCK (bb));
/* Don't check this block again. (Note that BLOCK_END is
invalid here; we deleted the last instruction in the
block.) */
- block_reg[bb->sindex] = 0;
+ block_reg[bb] = 0;
}
}
@@ -5424,14 +5420,14 @@ delete_null_pointer_checks (f)
{
sbitmap *nonnull_avin, *nonnull_avout;
unsigned int *block_reg;
- basic_block bb;
+ int bb;
int reg;
int regs_per_pass;
int max_reg;
struct null_pointer_info npi;
/* If we have only a single block, then there's nothing to do. */
- if (num_basic_blocks <= 1)
+ if (n_basic_blocks <= 1)
return;
/* Trying to perform global optimizations on flow graphs which have
@@ -5442,27 +5438,27 @@ delete_null_pointer_checks (f)
as blocks. But we do not want to punish small functions which have
a couple switch statements. So we require a relatively large number
of basic blocks and the ratio of edges to blocks to be high. */
- if (num_basic_blocks > 1000 && n_edges / num_basic_blocks >= 20)
+ if (n_basic_blocks > 1000 && n_edges / n_basic_blocks >= 20)
return;
/* We need four bitmaps, each with a bit for each register in each
basic block. */
max_reg = max_reg_num ();
- regs_per_pass = get_bitmap_width (4, last_basic_block, max_reg);
+ regs_per_pass = get_bitmap_width (4, n_basic_blocks, max_reg);
/* Allocate bitmaps to hold local and global properties. */
- npi.nonnull_local = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
- npi.nonnull_killed = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
- nonnull_avin = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
- nonnull_avout = sbitmap_vector_alloc (last_basic_block, regs_per_pass);
+ npi.nonnull_local = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ npi.nonnull_killed = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ nonnull_avin = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
+ nonnull_avout = sbitmap_vector_alloc (n_basic_blocks, regs_per_pass);
/* Go through the basic blocks, seeing whether or not each block
ends with a conditional branch whose condition is a comparison
against zero. Record the register compared in BLOCK_REG. */
- block_reg = (unsigned int *) xcalloc (last_basic_block, sizeof (int));
- FOR_ALL_BB (bb)
+ block_reg = (unsigned int *) xcalloc (n_basic_blocks, sizeof (int));
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- rtx last_insn = bb->end;
+ rtx last_insn = BLOCK_END (bb);
rtx condition, earliest, reg;
/* We only want conditional branches. */
@@ -5488,7 +5484,7 @@ delete_null_pointer_checks (f)
if (GET_CODE (reg) != REG)
continue;
- block_reg[bb->sindex] = REGNO (reg);
+ block_reg[bb] = REGNO (reg);
}
/* Go through the algorithm for each block of registers. */
@@ -5572,11 +5568,10 @@ free_code_hoist_mem ()
static void
compute_code_hoist_vbeinout ()
{
- int changed, passes;
- basic_block bb;
+ int bb, changed, passes;
- sbitmap_vector_zero (hoist_vbeout, last_basic_block);
- sbitmap_vector_zero (hoist_vbein, last_basic_block);
+ sbitmap_vector_zero (hoist_vbeout, n_basic_blocks);
+ sbitmap_vector_zero (hoist_vbein, n_basic_blocks);
passes = 0;
changed = 1;
@@ -5587,12 +5582,12 @@ compute_code_hoist_vbeinout ()
/* We scan the blocks in the reverse order to speed up
the convergence. */
- FOR_ALL_BB_REVERSE (bb)
+ for (bb = n_basic_blocks - 1; bb >= 0; bb--)
{
- changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb->sindex], antloc[bb->sindex],
- hoist_vbeout[bb->sindex], transp[bb->sindex]);
- if (bb->next_bb != EXIT_BLOCK_PTR)
- sbitmap_intersection_of_succs (hoist_vbeout[bb->sindex], hoist_vbein, bb->sindex);
+ changed |= sbitmap_a_or_b_and_c_cg (hoist_vbein[bb], antloc[bb],
+ hoist_vbeout[bb], transp[bb]);
+ if (bb != n_basic_blocks - 1)
+ sbitmap_intersection_of_succs (hoist_vbeout[bb], hoist_vbein, bb);
}
passes++;
@@ -5642,7 +5637,7 @@ hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
if (visited == NULL)
{
visited_allocated_locally = 1;
- visited = xcalloc (last_basic_block, 1);
+ visited = xcalloc (n_basic_blocks, 1);
}
for (pred = bb->pred; pred != NULL; pred = pred->pred_next)
@@ -5651,19 +5646,19 @@ hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
if (pred->src == ENTRY_BLOCK_PTR)
break;
- else if (visited[pred_bb->sindex])
+ else if (visited[pred_bb->index])
continue;
/* Does this predecessor generate this expression? */
- else if (TEST_BIT (comp[pred_bb->sindex], expr_index))
+ else if (TEST_BIT (comp[pred_bb->index], expr_index))
break;
- else if (! TEST_BIT (transp[pred_bb->sindex], expr_index))
+ else if (! TEST_BIT (transp[pred_bb->index], expr_index))
break;
/* Not killed. */
else
{
- visited[pred_bb->sindex] = 1;
+ visited[pred_bb->index] = 1;
if (! hoist_expr_reaches_here_p (expr_bb, expr_index,
pred_bb, visited))
break;
@@ -5680,12 +5675,12 @@ hoist_expr_reaches_here_p (expr_bb, expr_index, bb, visited)
static void
hoist_code ()
{
- basic_block bb, dominated;
+ int bb, dominated;
unsigned int i;
struct expr **index_map;
struct expr *expr;
- sbitmap_vector_zero (hoist_exprs, last_basic_block);
+ sbitmap_vector_zero (hoist_exprs, n_basic_blocks);
/* Compute a mapping from expression number (`bitmap_index') to
hash table entry. */
@@ -5697,34 +5692,33 @@ hoist_code ()
/* Walk over each basic block looking for potentially hoistable
expressions, nothing gets hoisted from the entry block. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
int found = 0;
int insn_inserted_p;
/* Examine each expression that is very busy at the exit of this
block. These are the potentially hoistable expressions. */
- for (i = 0; i < hoist_vbeout[bb->sindex]->n_bits; i++)
+ for (i = 0; i < hoist_vbeout[bb]->n_bits; i++)
{
int hoistable = 0;
- if (TEST_BIT (hoist_vbeout[bb->sindex], i)
- && TEST_BIT (transpout[bb->sindex], i))
+ if (TEST_BIT (hoist_vbeout[bb], i) && TEST_BIT (transpout[bb], i))
{
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
computes the expression. */
- FOR_ALL_BB (dominated)
+ for (dominated = 0; dominated < n_basic_blocks; dominated++)
{
/* Ignore self dominance. */
if (bb == dominated
- || ! TEST_BIT (dominators[dominated->sindex], bb->sindex))
+ || ! TEST_BIT (dominators[dominated], bb))
continue;
/* We've found a dominated block, now see if it computes
the busy expression and whether or not moving that
expression to the "beginning" of that block is safe. */
- if (!TEST_BIT (antloc[dominated->sindex], i))
+ if (!TEST_BIT (antloc[dominated], i))
continue;
/* Note if the expression would reach the dominated block
@@ -5732,7 +5726,8 @@ hoist_code ()
Keep track of how many times this expression is hoistable
from a dominated block into BB. */
- if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
+ if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
+ BASIC_BLOCK (dominated), NULL))
hoistable++;
}
@@ -5748,7 +5743,7 @@ hoist_code ()
to nullify any benefit we get from code hoisting. */
if (hoistable > 1)
{
- SET_BIT (hoist_exprs[bb->sindex], i);
+ SET_BIT (hoist_exprs[bb], i);
found = 1;
}
}
@@ -5759,29 +5754,29 @@ hoist_code ()
continue;
/* Loop over all the hoistable expressions. */
- for (i = 0; i < hoist_exprs[bb->sindex]->n_bits; i++)
+ for (i = 0; i < hoist_exprs[bb]->n_bits; i++)
{
/* We want to insert the expression into BB only once, so
note when we've inserted it. */
insn_inserted_p = 0;
/* These tests should be the same as the tests above. */
- if (TEST_BIT (hoist_vbeout[bb->sindex], i))
+ if (TEST_BIT (hoist_vbeout[bb], i))
{
/* We've found a potentially hoistable expression, now
we look at every block BB dominates to see if it
computes the expression. */
- FOR_ALL_BB (dominated)
+ for (dominated = 0; dominated < n_basic_blocks; dominated++)
{
/* Ignore self dominance. */
if (bb == dominated
- || ! TEST_BIT (dominators[dominated->sindex], bb->sindex))
+ || ! TEST_BIT (dominators[dominated], bb))
continue;
/* We've found a dominated block, now see if it computes
the busy expression and whether or not moving that
expression to the "beginning" of that block is safe. */
- if (!TEST_BIT (antloc[dominated->sindex], i))
+ if (!TEST_BIT (antloc[dominated], i))
continue;
/* The expression is computed in the dominated block and
@@ -5789,7 +5784,8 @@ hoist_code ()
dominated block. Now we have to determine if the
expression would reach the dominated block if it was
placed at the end of BB. */
- if (hoist_expr_reaches_here_p (bb, i, dominated, NULL))
+ if (hoist_expr_reaches_here_p (BASIC_BLOCK (bb), i,
+ BASIC_BLOCK (dominated), NULL))
{
struct expr *expr = index_map[i];
struct occr *occr = expr->antic_occr;
@@ -5797,7 +5793,7 @@ hoist_code ()
rtx set;
/* Find the right occurrence of this expression. */
- while (BLOCK_FOR_INSN (occr->insn) != dominated && occr)
+ while (BLOCK_NUM (occr->insn) != dominated && occr)
occr = occr->next;
/* Should never happen. */
@@ -5831,7 +5827,8 @@ hoist_code ()
occr->deleted_p = 1;
if (!insn_inserted_p)
{
- insert_insn_end_bb (index_map[i], bb, 0);
+ insert_insn_end_bb (index_map[i],
+ BASIC_BLOCK (bb), 0);
insn_inserted_p = 1;
}
}
@@ -5861,7 +5858,7 @@ one_code_hoisting_pass ()
if (n_exprs > 0)
{
- alloc_code_hoist_mem (last_basic_block, n_exprs);
+ alloc_code_hoist_mem (n_basic_blocks, n_exprs);
compute_code_hoist_data ();
hoist_code ();
free_code_hoist_mem ();
@@ -6111,15 +6108,15 @@ static void
compute_ld_motion_mems ()
{
struct ls_expr * ptr;
- basic_block bb;
+ int bb;
rtx insn;
pre_ldst_mems = NULL;
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- for (insn = bb->head;
- insn && insn != NEXT_INSN (bb->end);
+ for (insn = BLOCK_HEAD (bb);
+ insn && insn != NEXT_INSN (BLOCK_END (bb));
insn = NEXT_INSN (insn))
{
if (GET_RTX_CLASS (GET_CODE (insn)) == 'i')
@@ -6337,7 +6334,7 @@ store_ops_ok (x, bb)
case REG:
/* If a reg has changed after us in this
block, the operand has been killed. */
- return TEST_BIT (reg_set_in_block[bb->sindex], REGNO (x));
+ return TEST_BIT (reg_set_in_block[bb->index], REGNO (x));
case MEM:
x = XEXP (x, 0);
@@ -6436,24 +6433,23 @@ find_moveable_store (insn)
static int
compute_store_table ()
{
- int ret;
- basic_block bb;
+ int bb, ret;
unsigned regno;
rtx insn, pat;
max_gcse_regno = max_reg_num ();
- reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (last_basic_block,
+ reg_set_in_block = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks,
max_gcse_regno);
- sbitmap_vector_zero (reg_set_in_block, last_basic_block);
+ sbitmap_vector_zero (reg_set_in_block, n_basic_blocks);
pre_ldst_mems = 0;
/* Find all the stores we care about. */
- FOR_ALL_BB (bb)
+ for (bb = 0; bb < n_basic_blocks; bb++)
{
- regvec = & (reg_set_in_block[bb->sindex]);
- for (insn = bb->end;
- insn && insn != PREV_INSN (bb->end);
+ regvec = & (reg_set_in_block[bb]);
+ for (insn = BLOCK_END (bb);
+ insn && insn != PREV_INSN (BLOCK_HEAD (bb));
insn = PREV_INSN (insn))
{
/* Ignore anything that is not a normal insn. */
@@ -6472,7 +6468,7 @@ compute_store_table ()
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (clobbers_all
|| TEST_HARD_REG_BIT (regs_invalidated_by_call, regno))
- SET_BIT (reg_set_in_block[bb->sindex], regno);
+ SET_BIT (reg_set_in_block[bb], regno);
}
pat = PATTERN (insn);
@@ -6638,17 +6634,18 @@ store_killed_before (x, insn, bb)
static void
build_store_vectors ()
{
- basic_block bb, b;
+ basic_block bb;
+ int b;
rtx insn, st;
struct ls_expr * ptr;
/* Build the gen_vector. This is any store in the table which is not killed
by aliasing later in its block. */
- ae_gen = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (ae_gen, last_basic_block);
+ ae_gen = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (ae_gen, n_basic_blocks);
- st_antloc = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (st_antloc, last_basic_block);
+ st_antloc = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (st_antloc, n_basic_blocks);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
@@ -6669,7 +6666,7 @@ build_store_vectors ()
the block), and replace it with this one). We'll copy the
old SRC expression to an unused register in case there
are any side effects. */
- if (TEST_BIT (ae_gen[bb->sindex], ptr->index))
+ if (TEST_BIT (ae_gen[bb->index], ptr->index))
{
/* Find previous store. */
rtx st;
@@ -6686,7 +6683,7 @@ build_store_vectors ()
continue;
}
}
- SET_BIT (ae_gen[bb->sindex], ptr->index);
+ SET_BIT (ae_gen[bb->index], ptr->index);
AVAIL_STORE_LIST (ptr) = alloc_INSN_LIST (insn,
AVAIL_STORE_LIST (ptr));
}
@@ -6703,16 +6700,16 @@ build_store_vectors ()
free_INSN_LIST_list (&store_list);
}
- ae_kill = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (ae_kill, last_basic_block);
+ ae_kill = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (ae_kill, n_basic_blocks);
- transp = (sbitmap *) sbitmap_vector_alloc (last_basic_block, num_stores);
- sbitmap_vector_zero (transp, last_basic_block);
+ transp = (sbitmap *) sbitmap_vector_alloc (n_basic_blocks, num_stores);
+ sbitmap_vector_zero (transp, n_basic_blocks);
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
- FOR_ALL_BB (b)
+ for (b = 0; b < n_basic_blocks; b++)
{
- if (store_killed_after (ptr->pattern, b->head, b))
+ if (store_killed_after (ptr->pattern, BLOCK_HEAD (b), BASIC_BLOCK (b)))
{
/* The anticipatable expression is not killed if it's gen'd. */
/*
@@ -6730,10 +6727,10 @@ build_store_vectors ()
If we always kill it in this case, we'll sometimes do
uneccessary work, but it shouldn't actually hurt anything.
if (!TEST_BIT (ae_gen[b], ptr->index)). */
- SET_BIT (ae_kill[b->sindex], ptr->index);
+ SET_BIT (ae_kill[b], ptr->index);
}
else
- SET_BIT (transp[b->sindex], ptr->index);
+ SET_BIT (transp[b], ptr->index);
}
/* Any block with no exits calls some non-returning function, so
@@ -6744,10 +6741,10 @@ build_store_vectors ()
{
fprintf (gcse_file, "ST_avail and ST_antic (shown under loads..)\n");
print_ldst_list (gcse_file);
- dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, last_basic_block);
- dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, last_basic_block);
- dump_sbitmap_vector (gcse_file, "Transpt", "", transp, last_basic_block);
- dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, last_basic_block);
+ dump_sbitmap_vector (gcse_file, "st_antloc", "", st_antloc, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "st_kill", "", ae_kill, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "Transpt", "", transp, n_basic_blocks);
+ dump_sbitmap_vector (gcse_file, "st_avloc", "", ae_gen, n_basic_blocks);
}
}
@@ -6779,7 +6776,7 @@ insert_insn_start_bb (insn, bb)
if (gcse_file)
{
fprintf (gcse_file, "STORE_MOTION insert store at start of BB %d:\n",
- bb->sindex);
+ bb->index);
print_inline_rtx (gcse_file, insn, 6);
fprintf (gcse_file, "\n");
}
@@ -6845,7 +6842,7 @@ insert_store (expr, e)
if (gcse_file)
{
fprintf (gcse_file, "STORE_MOTION insert insn on edge (%d, %d):\n",
- e->src->sindex, e->dest->sindex);
+ e->src->index, e->dest->index);
print_inline_rtx (gcse_file, insn, 6);
fprintf (gcse_file, "\n");
}
@@ -6868,7 +6865,7 @@ replace_store_insn (reg, del, bb)
if (gcse_file)
{
fprintf (gcse_file,
- "STORE_MOTION delete insn in BB %d:\n ", bb->sindex);
+ "STORE_MOTION delete insn in BB %d:\n ", bb->index);
print_inline_rtx (gcse_file, del, 6);
fprintf (gcse_file, "\nSTORE MOTION replaced with insn:\n ");
print_inline_rtx (gcse_file, insn, 6);
@@ -6942,8 +6939,7 @@ free_store_memory ()
static void
store_motion ()
{
- basic_block x;
- int y;
+ int x;
struct ls_expr * ptr;
int update_flow = 0;
@@ -6976,13 +6972,13 @@ store_motion ()
/* Now we want to insert the new stores which are going to be needed. */
for (ptr = first_ls_expr (); ptr != NULL; ptr = next_ls_expr (ptr))
{
- FOR_ALL_BB (x)
- if (TEST_BIT (pre_delete_map[x->sindex], ptr->index))
- delete_store (ptr, x);
+ for (x = 0; x < n_basic_blocks; x++)
+ if (TEST_BIT (pre_delete_map[x], ptr->index))
+ delete_store (ptr, BASIC_BLOCK (x));
- for (y = 0; y < NUM_EDGES (edge_list); y++)
- if (TEST_BIT (pre_insert_map[y], ptr->index))
- update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, y));
+ for (x = 0; x < NUM_EDGES (edge_list); x++)
+ if (TEST_BIT (pre_insert_map[x], ptr->index))
+ update_flow |= insert_store (ptr, INDEX_EDGE (edge_list, x));
}
if (update_flow)