aboutsummaryrefslogtreecommitdiff
path: root/gcc/lcm.c
diff options
context:
space:
mode:
authorTrevor Saunders <tbsaunde+gcc@tbsaunde.org>2017-05-14 00:39:23 +0000
committerTrevor Saunders <tbsaunde@gcc.gnu.org>2017-05-14 00:39:23 +0000
commit6fa95e0961bc15efa5ff52fc7358aee78a16a33c (patch)
treecb4fb40255dee04a4d085d4a1614b8d8b295cd27 /gcc/lcm.c
parent35bfaf4d537dbf181575c9568a54da33d45a30ad (diff)
downloadgcc-6fa95e0961bc15efa5ff52fc7358aee78a16a33c.zip
gcc-6fa95e0961bc15efa5ff52fc7358aee78a16a33c.tar.gz
gcc-6fa95e0961bc15efa5ff52fc7358aee78a16a33c.tar.bz2
make inverted_post_order_compute() operate on a vec
gcc/ChangeLog: 2017-05-13 Trevor Saunders <tbsaunde+gcc@tbsaunde.org> * cfganal.c (inverted_post_order_compute): Change argument type to vec *. * cfganal.h (inverted_post_order_compute): Adjust prototype. * df-core.c (rest_of_handle_df_initialize): Adjust. (rest_of_handle_df_finish): Likewise. (df_analyze_1): Likewise. (df_analyze): Likewise. (loop_inverted_post_order_compute): Change argument to be a vec *. (df_analyze_loop): Adjust. (df_get_n_blocks): Likewise. (df_get_postorder): Likewise. * df.h (struct df_d): Change field to be a vec. * lcm.c (compute_laterin): Adjust. (compute_available): Likewise. * lra-lives.c (lra_create_live_ranges_1): Likewise. * tree-ssa-dce.c (remove_dead_stmt): Likewise. * tree-ssa-pre.c (compute_antic): Likewise. From-SVN: r248027
Diffstat (limited to 'gcc/lcm.c')
-rw-r--r--gcc/lcm.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/gcc/lcm.c b/gcc/lcm.c
index edc86b5..e866627 100644
--- a/gcc/lcm.c
+++ b/gcc/lcm.c
@@ -270,9 +270,9 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
/* Add all the blocks to the worklist. This prevents an early exit from
the loop given our optimistic initialization of LATER above. */
- int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
- int postorder_num = inverted_post_order_compute (postorder);
- for (int i = 0; i < postorder_num; ++i)
+ auto_vec<int, 20> postorder;
+ inverted_post_order_compute (&postorder);
+ for (unsigned int i = 0; i < postorder.length (); ++i)
{
bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
@@ -281,7 +281,6 @@ compute_laterin (struct edge_list *edge_list, sbitmap *earliest,
*qin++ = bb;
bb->aux = bb;
}
- free (postorder);
/* Note that we do not use the last allocated element for our queue,
as EXIT_BLOCK is never inserted into it. */
@@ -512,9 +511,9 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
/* Put every block on the worklist; this is necessary because of the
optimistic initialization of AVOUT above. Use inverted postorder
to make the dataflow problem require less iterations. */
- int *postorder = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
- int postorder_num = inverted_post_order_compute (postorder);
- for (int i = 0; i < postorder_num; ++i)
+ auto_vec<int, 20> postorder;
+ inverted_post_order_compute (&postorder);
+ for (unsigned int i = 0; i < postorder.length (); ++i)
{
bb = BASIC_BLOCK_FOR_FN (cfun, postorder[i]);
if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
@@ -523,7 +522,6 @@ compute_available (sbitmap *avloc, sbitmap *kill, sbitmap *avout,
*qin++ = bb;
bb->aux = bb;
}
- free (postorder);
qin = worklist;
qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS];