aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog17
-rw-r--r--gcc/doc/tm.texi6
-rw-r--r--gcc/doc/tm.texi.in2
-rw-r--r--gcc/sched-deps.c53
-rw-r--r--gcc/sched-int.h13
-rw-r--r--gcc/target.def7
6 files changed, 94 insertions, 4 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 60f09e4..ecfab77 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,9 +1,24 @@
+2011-05-27 Bernd Schmidt <bernds@codesourcery.com>
+
+ * sched-int.h (struct _haifa_deps_insn_data): New members cond
+ and reverse_cond.
+ (INSN_COND, INSN_REVERSE_COND): New macros.
+ * sched-deps.c (deps_analyze_insn): Call sched_get_condition_with_rev
+ once.
+ (sched_get_condition_with_rev): Cache the results, and look them up
+ if possible.
+ (sched_analyze_insn): Destroy INSN_COND of previous insns if they
+ are clobbered by the current insn.
+ * target.def (exposed_pipline): New sched data hook.
+ * doc/tm.texi.in: TARGET_SCHED_EXPOSED_PIPELINE: Add hook.
+ * doc/tm.texi: Regenerate.
+
2011-05-27 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
PR tree-optimization/49170
* tree-ssa-math-opts.c (execute_cse_sincos): Add checks for
sincos or cexp.
-
+
2011-05-27 Richard Guenther <rguenther@suse.de>
PR middle-end/49189
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index c2aa216..6526898 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -6794,6 +6794,12 @@ This hook is called by Haifa Scheduler. It performs the operation specified
in its second parameter.
@end deftypefn
+@deftypevr {Target Hook} bool TARGET_SCHED_EXPOSED_PIPELINE
+True if the processor has an exposed pipeline, which means that not just
+the order of instructions is important for correctness when scheduling, but
+also the latencies of operations.
+@end deftypevr
+
@node Sections
@section Dividing the Output into Sections (Texts, Data, @dots{})
@c the above section title is WAY too long. maybe cut the part between
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index ab0b39b..226e0bf 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -6739,6 +6739,8 @@ This hook is called by Haifa Scheduler. It performs the operation specified
in its second parameter.
@end deftypefn
+@hook TARGET_SCHED_EXPOSED_PIPELINE
+
@node Sections
@section Dividing the Output into Sections (Texts, Data, @dots{})
@c the above section title is WAY too long. maybe cut the part between
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 09bf65d..343d03c 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -488,13 +488,27 @@ deps_may_trap_p (const_rtx mem)
/* Find the condition under which INSN is executed. If REV is not NULL,
it is set to TRUE when the returned comparison should be reversed
- to get the actual condition. */
+ to get the actual condition.
+ We only do actual work the first time we come here for an insn; the
+ results are cached in INSN_COND and INSN_REVERSE_COND. */
static rtx
sched_get_condition_with_rev (const_rtx insn, bool *rev)
{
rtx pat = PATTERN (insn);
rtx src;
+ if (INSN_COND (insn) == const_true_rtx)
+ return NULL_RTX;
+
+ if (INSN_COND (insn) != NULL_RTX)
+ {
+ if (rev)
+ *rev = INSN_REVERSE_COND (insn);
+ return INSN_COND (insn);
+ }
+
+ INSN_COND (insn) = const_true_rtx;
+ INSN_REVERSE_COND (insn) = false;
if (pat == 0)
return 0;
@@ -502,7 +516,10 @@ sched_get_condition_with_rev (const_rtx insn, bool *rev)
*rev = false;
if (GET_CODE (pat) == COND_EXEC)
- return COND_EXEC_TEST (pat);
+ {
+ INSN_COND (insn) = COND_EXEC_TEST (pat);
+ return COND_EXEC_TEST (pat);
+ }
if (!any_condjump_p (insn) || !onlyjump_p (insn))
return 0;
@@ -510,7 +527,10 @@ sched_get_condition_with_rev (const_rtx insn, bool *rev)
src = SET_SRC (pc_set (insn));
if (XEXP (src, 2) == pc_rtx)
- return XEXP (src, 0);
+ {
+ INSN_COND (insn) = XEXP (src, 0);
+ return XEXP (src, 0);
+ }
else if (XEXP (src, 1) == pc_rtx)
{
rtx cond = XEXP (src, 0);
@@ -521,6 +541,8 @@ sched_get_condition_with_rev (const_rtx insn, bool *rev)
if (rev)
*rev = true;
+ INSN_COND (insn) = cond;
+ INSN_REVERSE_COND (insn) = true;
return cond;
}
@@ -2818,6 +2840,8 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
}
else
{
+ regset_head set_or_clobbered;
+
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
{
struct deps_reg *reg_last = &deps->reg_last[i];
@@ -2848,6 +2872,25 @@ sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
}
}
+ if (targetm.sched.exposed_pipeline)
+ {
+ INIT_REG_SET (&set_or_clobbered);
+ bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
+ reg_pending_sets);
+ EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
+ {
+ struct deps_reg *reg_last = &deps->reg_last[i];
+ rtx list;
+ for (list = reg_last->uses; list; list = XEXP (list, 1))
+ {
+ rtx other = XEXP (list, 0);
+ if (INSN_COND (other) != const_true_rtx
+ && refers_to_regno_p (i, i + 1, INSN_COND (other), NULL))
+ INSN_COND (other) = const_true_rtx;
+ }
+ }
+ }
+
/* If the current insn is conditional, we can't free any
of the lists. */
if (sched_has_condition_p (insn))
@@ -3222,6 +3265,10 @@ deps_analyze_insn (struct deps_desc *deps, rtx insn)
if (sched_deps_info->start_insn)
sched_deps_info->start_insn (insn);
+ /* Record the condition for this insn. */
+ if (NONDEBUG_INSN_P (insn))
+ sched_get_condition_with_rev (insn, NULL);
+
if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
{
/* Make each JUMP_INSN (but not a speculative check)
diff --git a/gcc/sched-int.h b/gcc/sched-int.h
index a18f846..f310f8a 100644
--- a/gcc/sched-int.h
+++ b/gcc/sched-int.h
@@ -679,6 +679,17 @@ struct _haifa_deps_insn_data
search in 'forw_deps'. */
deps_list_t resolved_forw_deps;
+ /* If the insn is conditional (either through COND_EXEC, or because
+ it is a conditional branch), this records the condition. NULL
+ for insns that haven't been seen yet or don't have a condition;
+ const_true_rtx to mark an insn without a condition, or with a
+ condition that has been clobbered by a subsequent insn. */
+ rtx cond;
+
+ /* True if the condition in 'cond' should be reversed to get the actual
+ condition. */
+ unsigned int reverse_cond : 1;
+
/* Some insns (e.g. call) are not allowed to move across blocks. */
unsigned int cant_move : 1;
};
@@ -838,6 +849,8 @@ extern VEC(haifa_deps_insn_data_def, heap) *h_d_i_d;
#define INSN_RESOLVED_FORW_DEPS(INSN) (HDID (INSN)->resolved_forw_deps)
#define INSN_HARD_BACK_DEPS(INSN) (HDID (INSN)->hard_back_deps)
#define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
+#define INSN_COND(INSN) (HDID (INSN)->cond)
+#define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
#define CANT_MOVE(INSN) (HDID (INSN)->cant_move)
#define CANT_MOVE_BY_LUID(LUID) (VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
LUID)->cant_move)
diff --git a/gcc/target.def b/gcc/target.def
index 6137e97..ea283f3 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -897,6 +897,13 @@ DEFHOOK
bool, (rtx insn, int x),
hook_bool_rtx_int_false)
+DEFHOOKPOD
+(exposed_pipeline,
+"True if the processor has an exposed pipeline, which means that not just\n\
+the order of instructions is important for correctness when scheduling, but\n\
+also the latencies of operations.",
+bool, false)
+
HOOK_VECTOR_END (sched)
/* Functions relating to vectorization. */