aboutsummaryrefslogtreecommitdiff
path: root/gcc/regcprop.c
diff options
context:
space:
mode:
authorTrevor Saunders <tsaunders@mozilla.com>2014-04-17 12:37:34 +0000
committerTrevor Saunders <tbsaunde@gcc.gnu.org>2014-04-17 12:37:34 +0000
commitbe55bfe6cf456943b12fe128f8a445b583ace36f (patch)
tree2c7de59d1f6572c580defbe0ccac2d0b83cd1eb3 /gcc/regcprop.c
parent1a3d085cf2a0caa5daef7c0443b1d280bcef295e (diff)
downloadgcc-be55bfe6cf456943b12fe128f8a445b583ace36f.zip
gcc-be55bfe6cf456943b12fe128f8a445b583ace36f.tar.gz
gcc-be55bfe6cf456943b12fe128f8a445b583ace36f.tar.bz2
pass cfun to pass::execute
gcc/ * passes.c (opt_pass::execute): Adjust. (pass_manager::execute_pass_mode_switching): Likewise. (early_local_passes::execute): Likewise. (execute_one_pass): Pass cfun to the pass's execute method. * tree-pass.h (opt_pass::execute): Add function * argument. * asan.c, auto-inc-dec.c, bb-reorder.c, bt-load.c, cfgcleanup.c, cfgexpand.c, cfgrtl.c, cgraphbuild.c, combine-stack-adj.c, combine.c, compare-elim.c, config/arc/arc.c, config/epiphany/mode-switch-use.c, config/epiphany/resolve-sw-modes.c, config/i386/i386.c, config/mips/mips.c, config/rl78/rl78.c, config/s390/s390.c, config/sparc/sparc.c, cprop.c, dce.c, df-core.c, dse.c, dwarf2cfi.c, except.c, final.c, function.c, fwprop.c, gcse.c, gimple-low.c, gimple-ssa-isolate-paths.c, gimple-ssa-strength-reduction.c, graphite.c, ifcvt.c, init-regs.c, ipa-cp.c, ipa-devirt.c, ipa-inline-analysis.c, ipa-inline.c, ipa-profile.c, ipa-pure-const.c, ipa-reference.c, ipa-split.c, ipa.c, ira.c, jump.c, loop-init.c, lower-subreg.c, mode-switching.c, omp-low.c, postreload-gcse.c, postreload.c, predict.c, recog.c, ree.c, reg-stack.c, regcprop.c, reginfo.c, regrename.c, reorg.c, sched-rgn.c, stack-ptr-mod.c, store-motion.c, tracer.c, trans-mem.c, tree-call-cdce.c, tree-cfg.c, tree-cfgcleanup.c, tree-complex.c, tree-eh.c, tree-emutls.c, tree-if-conv.c, tree-into-ssa.c, tree-loop-distribution.c, tree-nrv.c, tree-object-size.c, tree-parloops.c, tree-predcom.c, tree-ssa-ccp.c, tree-ssa-copy.c, tree-ssa-copyrename.c, tree-ssa-dce.c, tree-ssa-dom.c, tree-ssa-dse.c, tree-ssa-forwprop.c, tree-ssa-ifcombine.c, tree-ssa-loop-ch.c, tree-ssa-loop-im.c, tree-ssa-loop-ivcanon.c, tree-ssa-loop-prefetch.c, tree-ssa-loop-unswitch.c, tree-ssa-loop.c, tree-ssa-math-opts.c, tree-ssa-phiopt.c, tree-ssa-phiprop.c, tree-ssa-pre.c, tree-ssa-reassoc.c, tree-ssa-sink.c, tree-ssa-strlen.c, tree-ssa-structalias.c, tree-ssa-uncprop.c, tree-ssa-uninit.c, tree-ssa.c, tree-ssanames.c, tree-stdarg.c, tree-switch-conversion.c, tree-tailcall.c, tree-vect-generic.c, tree-vectorizer.c, tree-vrp.c, tree.c, tsan.c, ubsan.c, var-tracking.c, vtable-verify.c, web.c: Adjust. From-SVN: r209482
Diffstat (limited to 'gcc/regcprop.c')
-rw-r--r--gcc/regcprop.c174
1 files changed, 86 insertions, 88 deletions
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index 24992e4..c1fbb65 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -1056,93 +1056,6 @@ copyprop_hardreg_forward_1 (basic_block bb, struct value_data *vd)
return anything_changed;
}
-/* Main entry point for the forward copy propagation optimization. */
-
-static unsigned int
-copyprop_hardreg_forward (void)
-{
- struct value_data *all_vd;
- basic_block bb;
- sbitmap visited;
- bool analyze_called = false;
-
- all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (cfun));
-
- visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
- bitmap_clear (visited);
-
- if (MAY_HAVE_DEBUG_INSNS)
- debug_insn_changes_pool
- = create_alloc_pool ("debug insn changes pool",
- sizeof (struct queued_debug_insn_change), 256);
-
- FOR_EACH_BB_FN (bb, cfun)
- {
- bitmap_set_bit (visited, bb->index);
-
- /* If a block has a single predecessor, that we've already
- processed, begin with the value data that was live at
- the end of the predecessor block. */
- /* ??? Ought to use more intelligent queuing of blocks. */
- if (single_pred_p (bb)
- && bitmap_bit_p (visited, single_pred (bb)->index)
- && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
- {
- all_vd[bb->index] = all_vd[single_pred (bb)->index];
- if (all_vd[bb->index].n_debug_insn_changes)
- {
- unsigned int regno;
-
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- {
- if (all_vd[bb->index].e[regno].debug_insn_changes)
- {
- all_vd[bb->index].e[regno].debug_insn_changes = NULL;
- if (--all_vd[bb->index].n_debug_insn_changes == 0)
- break;
- }
- }
- }
- }
- else
- init_value_data (all_vd + bb->index);
-
- copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
- }
-
- if (MAY_HAVE_DEBUG_INSNS)
- {
- FOR_EACH_BB_FN (bb, cfun)
- if (bitmap_bit_p (visited, bb->index)
- && all_vd[bb->index].n_debug_insn_changes)
- {
- unsigned int regno;
- bitmap live;
-
- if (!analyze_called)
- {
- df_analyze ();
- analyze_called = true;
- }
- live = df_get_live_out (bb);
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- if (all_vd[bb->index].e[regno].debug_insn_changes)
- {
- if (REGNO_REG_SET_P (live, regno))
- apply_debug_insn_changes (all_vd + bb->index, regno);
- if (all_vd[bb->index].n_debug_insn_changes == 0)
- break;
- }
- }
-
- free_alloc_pool (debug_insn_changes_pool);
- }
-
- sbitmap_free (visited);
- free (all_vd);
- return 0;
-}
-
/* Dump the value chain data to stderr. */
DEBUG_FUNCTION void
@@ -1276,10 +1189,95 @@ public:
return (optimize > 0 && (flag_cprop_registers));
}
- unsigned int execute () { return copyprop_hardreg_forward (); }
+ virtual unsigned int execute (function *);
}; // class pass_cprop_hardreg
+unsigned int
+pass_cprop_hardreg::execute (function *fun)
+{
+ struct value_data *all_vd;
+ basic_block bb;
+ sbitmap visited;
+ bool analyze_called = false;
+
+ all_vd = XNEWVEC (struct value_data, last_basic_block_for_fn (fun));
+
+ visited = sbitmap_alloc (last_basic_block_for_fn (fun));
+ bitmap_clear (visited);
+
+ if (MAY_HAVE_DEBUG_INSNS)
+ debug_insn_changes_pool
+ = create_alloc_pool ("debug insn changes pool",
+ sizeof (struct queued_debug_insn_change), 256);
+
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ bitmap_set_bit (visited, bb->index);
+
+ /* If a block has a single predecessor, that we've already
+ processed, begin with the value data that was live at
+ the end of the predecessor block. */
+ /* ??? Ought to use more intelligent queuing of blocks. */
+ if (single_pred_p (bb)
+ && bitmap_bit_p (visited, single_pred (bb)->index)
+ && ! (single_pred_edge (bb)->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)))
+ {
+ all_vd[bb->index] = all_vd[single_pred (bb)->index];
+ if (all_vd[bb->index].n_debug_insn_changes)
+ {
+ unsigned int regno;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (all_vd[bb->index].e[regno].debug_insn_changes)
+ {
+ all_vd[bb->index].e[regno].debug_insn_changes = NULL;
+ if (--all_vd[bb->index].n_debug_insn_changes == 0)
+ break;
+ }
+ }
+ }
+ }
+ else
+ init_value_data (all_vd + bb->index);
+
+ copyprop_hardreg_forward_1 (bb, all_vd + bb->index);
+ }
+
+ if (MAY_HAVE_DEBUG_INSNS)
+ {
+ FOR_EACH_BB_FN (bb, fun)
+ if (bitmap_bit_p (visited, bb->index)
+ && all_vd[bb->index].n_debug_insn_changes)
+ {
+ unsigned int regno;
+ bitmap live;
+
+ if (!analyze_called)
+ {
+ df_analyze ();
+ analyze_called = true;
+ }
+ live = df_get_live_out (bb);
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (all_vd[bb->index].e[regno].debug_insn_changes)
+ {
+ if (REGNO_REG_SET_P (live, regno))
+ apply_debug_insn_changes (all_vd + bb->index, regno);
+ if (all_vd[bb->index].n_debug_insn_changes == 0)
+ break;
+ }
+ }
+
+ free_alloc_pool (debug_insn_changes_pool);
+ }
+
+ sbitmap_free (visited);
+ free (all_vd);
+ return 0;
+}
+
} // anon namespace
rtl_opt_pass *