aboutsummaryrefslogtreecommitdiff
path: root/gcc/sched-deps.c
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2011-08-12 14:00:00 -0700
committerRichard Henderson <rth@gcc.gnu.org>2011-08-12 14:00:00 -0700
commit7b4ef66246ab45928f337bde3d509a28352ebfb1 (patch)
treed173152d41fb9553ecaf105fbe4cff33fa1e10b5 /gcc/sched-deps.c
parentc789cb7756423b73379c821307c7005a14ab8adc (diff)
downloadgcc-7b4ef66246ab45928f337bde3d509a28352ebfb1.zip
gcc-7b4ef66246ab45928f337bde3d509a28352ebfb1.tar.gz
gcc-7b4ef66246ab45928f337bde3d509a28352ebfb1.tar.bz2
re PR rtl-optimization/49994 (ICE: in maybe_record_trace_start, at dwarf2cfi.c:2234 with -fsched2-use-superblocks)
PR rtl-opt/49994 * sched-init.h (struct deps_desc): Add sched_before_next_jump. * sched-deps.c (init_deps): Clear it. (deps_analyze_insn): Consume it. (sched_analyze_insn): Fill it. From-SVN: r177721
Diffstat (limited to 'gcc/sched-deps.c')
-rw-r--r--gcc/sched-deps.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 2961cca..ed592c8 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -2696,6 +2696,18 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
add_dependence_list (insn, deps->last_function_call_may_noreturn,
1, REG_DEP_ANTI);
+ /* We must avoid creating a situation in which two successors of the
+ current block have different unwind info after scheduling. If at any
+ point the two paths re-join this leads to incorrect unwind info. */
+ /* ??? There are certain situations involving a forced frame pointer in
+ which, with extra effort, we could fix up the unwind info at a later
+ CFG join. However, it seems better to notice these cases earlier
+ during prologue generation and avoid marking the frame pointer setup
+ as frame-related at all. */
+ if (RTX_FRAME_RELATED_P (insn))
+ deps->sched_before_next_jump
+ = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
+
if (code == COND_EXEC)
{
sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
@@ -3302,12 +3314,11 @@ deps_analyze_insn (struct deps_desc *deps, rtx insn)
if (NONDEBUG_INSN_P (insn))
sched_get_condition_with_rev (insn, NULL);
- if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
+ if (JUMP_P (insn))
{
/* Make each JUMP_INSN (but not a speculative check)
a scheduling barrier for memory references. */
if (!deps->readonly
- && JUMP_P (insn)
&& !(sel_sched_p ()
&& sel_insn_is_speculation_check (insn)))
{
@@ -3326,6 +3337,15 @@ deps_analyze_insn (struct deps_desc *deps, rtx insn)
}
}
+ /* For each insn which shouldn't cross a jump, add a dependence. */
+ add_dependence_list_and_free (deps, insn,
+ &deps->sched_before_next_jump, 1,
+ REG_DEP_ANTI);
+
+ sched_analyze_insn (deps, PATTERN (insn), insn);
+ }
+ else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
+ {
sched_analyze_insn (deps, PATTERN (insn), insn);
}
else if (CALL_P (insn))
@@ -3571,6 +3591,7 @@ init_deps (struct deps_desc *deps, bool lazy_reg_last)
deps->last_function_call = 0;
deps->last_function_call_may_noreturn = 0;
deps->sched_before_next_call = 0;
+ deps->sched_before_next_jump = 0;
deps->in_post_call_group_p = not_post_call;
deps->last_debug_insn = 0;
deps->last_reg_pending_barrier = NOT_A_BARRIER;