aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Henderson <rth@cygnus.com>2000-04-30 04:01:53 -0700
committerRichard Henderson <rth@gcc.gnu.org>2000-04-30 04:01:53 -0700
commit9ec6d7ab0fbf5ed057ad9bce6150aa6b0336b1e3 (patch)
tree925f6eae61dfe3a5d2f84372d731b4bb1fa58df9 /gcc
parent927b868f8ce55b4cc88fd093c03d4a301d06fda9 (diff)
downloadgcc-9ec6d7ab0fbf5ed057ad9bce6150aa6b0336b1e3.zip
gcc-9ec6d7ab0fbf5ed057ad9bce6150aa6b0336b1e3.tar.gz
gcc-9ec6d7ab0fbf5ed057ad9bce6150aa6b0336b1e3.tar.bz2
ifcvt.c: New file.
* ifcvt.c: New file. * Makefile.in (OBJS): Add it. (ifcvt.o): New target. * jump.c (jump_optimize_1): Remove all code related to if-conversion, and conditional arithmetic. (find_insert_position): Remove. * timevar.def (TV_IFCVT, TV_IFCVT2): New. * toplev.c (DFI_ce, DFI_ce2): New. (dump_file): Add ce and ce2 dumps. (rest_of_compilation): Run if_convert a couple o times. Set cse_not_expected after cse2. Don't set no_new_pseudos until after sched1 or recompute_reg_usage. From-SVN: r33547
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog15
-rw-r--r--gcc/Makefile.in5
-rw-r--r--gcc/ifcvt.c2024
-rw-r--r--gcc/jump.c1491
-rw-r--r--gcc/timevar.def2
-rw-r--r--gcc/toplev.c104
6 files changed, 2147 insertions, 1494 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 31ec5f4..312ec98 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,20 @@
2000-04-30 Richard Henderson <rth@cygnus.com>
+ * ifcvt.c: New file.
+ * Makefile.in (OBJS): Add it.
+ (ifcvt.o): New target.
+ * jump.c (jump_optimize_1): Remove all code related to if-conversion,
+ and conditional arithmetic.
+ (find_insert_position): Remove.
+ * timevar.def (TV_IFCVT, TV_IFCVT2): New.
+ * toplev.c (DFI_ce, DFI_ce2): New.
+ (dump_file): Add ce and ce2 dumps.
+ (rest_of_compilation): Run if_convert a couple o times. Set
+ cse_not_expected after cse2. Don't set no_new_pseudos until
+ after sched1 or recompute_reg_usage.
+
+2000-04-30 Richard Henderson <rth@cygnus.com>
+
* config/alpha/t-crtbe (crtbegin.o): Add "-I.".
(crtend.o, crtbeginS.o, crtendS.o): Likewise.
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 7c112a4..f7b8de3 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -684,7 +684,7 @@ OBJS = diagnostic.o \
profile.o insn-attrtab.o $(out_object_file) $(EXTRA_OBJS) convert.o \
mbchar.o dyn-string.o splay-tree.o graph.o sbitmap.o resource.o hash.o \
predict.o lists.o ggc-common.o $(GGC) simplify-rtx.o ssa.o bb-reorder.o \
- sibcall.o conflict.o timevar.o
+ sibcall.o conflict.o timevar.o ifcvt.o
# GEN files are listed separately, so they can be built before doing parallel
# makes for cc1 or cc1plus. Otherwise sequent parallel make attempts to load
@@ -1663,6 +1663,9 @@ timevar.o : timevar.c $(CONFIG_H) system.h $(TIMEVAR_H) flags.h
regrename.o : regrename.c $(CONFIG_H) system.h $(RTL_H) $(TREE_H) flags.h \
insn-config.h $(BASIC_BLOCK_H) $(REGS_H) hard-reg-set.h output.h \
$(RECOG_H) function.h resource.h
+ifcvt.o : ifcvt.c $(CONFIG_H) system.h $(RTL_H) $(REGS_H) \
+ flags.h insn-config.h function.h $(RECOG_H) $(BASIC_BLOCK_H) $(EXPR_H) \
+ output.h
$(out_object_file): $(out_file) $(CONFIG_H) $(TREE_H) $(GGC_H) \
$(RTL_H) $(REGS_H) hard-reg-set.h real.h insn-config.h conditions.h \
insn-flags.h output.h insn-attr.h insn-codes.h system.h toplev.h function.h
diff --git a/gcc/ifcvt.c b/gcc/ifcvt.c
new file mode 100644
index 0000000..33c1918
--- /dev/null
+++ b/gcc/ifcvt.c
@@ -0,0 +1,2024 @@
+/* If-conversion support.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+
+ This file is part of GNU CC.
+
+ GNU CC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ GNU CC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GNU CC; see the file COPYING. If not, write to
+ the Free Software Foundation, 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+
+#include "rtl.h"
+#include "regs.h"
+#include "function.h"
+#include "flags.h"
+#include "insn-config.h"
+#include "recog.h"
+#include "basic-block.h"
+#include "expr.h"
+#include "output.h"
+#include "hard-reg-set.h"
+#include "tm_p.h"
+
+
+#ifndef HAVE_conditional_execution
+#define HAVE_conditional_execution 0
+#endif
+#ifndef HAVE_conditional_move
+#define HAVE_conditional_move 0
+#endif
+#ifndef HAVE_incscc
+#define HAVE_incscc 0
+#endif
+#ifndef HAVE_decscc
+#define HAVE_decscc 0
+#endif
+
+#ifndef MAX_CONDITIONAL_EXECUTE
+#define MAX_CONDITIONAL_EXECUTE (BRANCH_COST + 1)
+#endif
+
+#define EDGE_COMPLEX (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH)
+
+#define NULL_EDGE ((struct edge_def *)NULL)
+#define NULL_BLOCK ((struct basic_block_def *)NULL)
+
+/* # of IF-THEN or IF-THEN-ELSE blocks we looked at */
+static int num_possible_if_blocks;
+
+/* # of IF-THEN or IF-THEN-ELSE blocks were converted to conditional
+ execution. */
+static int num_updated_if_blocks;
+
+/* # of basic blocks that were removed. */
+static int num_removed_blocks;
+
+/* The post-dominator relation on the original block numbers. */
+static sbitmap *post_dominators;
+
+/* Forward references. */
+static int count_bb_insns PARAMS ((basic_block));
+static rtx first_active_insn PARAMS ((basic_block));
+static int last_active_insn_p PARAMS ((basic_block, rtx));
+
+static int cond_exec_process_insns PARAMS ((rtx, rtx, rtx, int));
+static rtx cond_exec_get_condition PARAMS ((rtx));
+static int cond_exec_process_if_block PARAMS ((basic_block, basic_block,
+ basic_block, basic_block));
+
+static rtx noce_get_condition PARAMS ((rtx, rtx *));
+static int noce_process_if_block PARAMS ((basic_block, basic_block,
+ basic_block, basic_block));
+
+static int process_if_block PARAMS ((basic_block, basic_block,
+ basic_block, basic_block));
+static void merge_if_block PARAMS ((basic_block, basic_block,
+ basic_block, basic_block));
+
+static int find_if_header PARAMS ((basic_block));
+static int find_if_block PARAMS ((basic_block, edge, edge));
+static int find_if_case_1 PARAMS ((basic_block, edge, edge));
+static int find_if_case_2 PARAMS ((basic_block, edge, edge));
+static int find_memory PARAMS ((rtx *, void *));
+static int dead_or_predicable PARAMS ((basic_block, basic_block,
+ basic_block, rtx, int));
+
+/* Abuse the basic_block AUX field to store the original block index,
+ as well as a flag indicating that the block should be rescaned for
+ life analysis. */
+
+#define SET_ORIG_INDEX(BB,I) ((BB)->aux = (void *)((size_t)(I) << 1))
+#define ORIG_INDEX(BB) ((size_t)(BB)->aux >> 1)
+#define SET_UPDATE_LIFE(BB) ((BB)->aux = (void *)((size_t)(BB)->aux | 1))
+#define UPDATE_LIFE(BB) ((size_t)(BB)->aux & 1)
+
+
+/* Count the number of non-jump active insns in BB. */
+
+static int
+count_bb_insns (bb)
+ basic_block bb;
+{
+ int count = 0;
+ rtx insn = bb->head;
+
+ while (1)
+ {
+ if (GET_CODE (insn) == CALL_INSN || GET_CODE (insn) == INSN)
+ count++;
+
+ if (insn == bb->end)
+ break;
+ insn = NEXT_INSN (insn);
+ }
+
+ return count;
+}
+
+/* Return the first non-jump active insn in the basic block. */
+
+static rtx
+first_active_insn (bb)
+ basic_block bb;
+{
+ rtx insn = bb->head;
+
+ if (GET_CODE (insn) == CODE_LABEL)
+ {
+ if (insn == bb->end)
+ return NULL_RTX;
+ insn = NEXT_INSN (insn);
+ }
+
+ while (GET_CODE (insn) == NOTE)
+ {
+ if (insn == bb->end)
+ return NULL_RTX;
+ insn = NEXT_INSN (insn);
+ }
+
+ if (GET_CODE (insn) == JUMP_INSN)
+ return NULL_RTX;
+
+ return insn;
+}
+
+/* Return true if INSN is the last active non-jump insn in BB. */
+
+static int
+last_active_insn_p (bb, insn)
+ basic_block bb;
+ rtx insn;
+{
+ do
+ {
+ if (insn == bb->end)
+ return TRUE;
+ insn = NEXT_INSN (insn);
+ }
+ while (GET_CODE (insn) == NOTE);
+
+ return GET_CODE (insn) == JUMP_INSN;
+}
+
+/* Go through a bunch of insns, converting them to conditional
+ execution format if possible. Return TRUE if all of the non-note
+ insns were processed. */
+
+static int
+cond_exec_process_insns (start, end, test, mod_ok)
+ rtx start; /* first insn to look at */
+ rtx end; /* last insn to look at */
+ rtx test; /* conditional execution test */
+ int mod_ok; /* true if modifications ok last insn. */
+{
+ int must_be_last = FALSE;
+ rtx insn;
+
+ for (insn = start; ; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == NOTE)
+ goto insn_done;
+
+ if (GET_CODE (insn) != INSN && GET_CODE (insn) != CALL_INSN)
+ abort ();
+
+ /* Last insn wasn't last? */
+ if (must_be_last)
+ return FALSE;
+
+ if (modified_in_p (test, insn))
+ {
+ if (!mod_ok)
+ return FALSE;
+ must_be_last = TRUE;
+ }
+
+ /* Now build the conditional form of the instruction. */
+ validate_change (insn, &PATTERN (insn),
+ gen_rtx_COND_EXEC (VOIDmode, copy_rtx (test),
+ PATTERN (insn)), 1);
+
+ insn_done:
+ if (insn == end)
+ break;
+ }
+
+ return TRUE;
+}
+
+/* Return the condition for a jump. Do not do any special processing. */
+
+static rtx
+cond_exec_get_condition (jump)
+ rtx jump;
+{
+ rtx test_if, cond;
+
+ if (condjump_p (jump))
+ test_if = SET_SRC (PATTERN (jump));
+ else if (condjump_in_parallel_p (jump))
+ test_if = SET_SRC (XVECEXP (PATTERN (jump), 0, 0));
+ else
+ return NULL_RTX;
+ cond = XEXP (test_if, 0);
+
+ /* If this branches to JUMP_LABEL when the condition is false,
+ reverse the condition. */
+ if (GET_CODE (XEXP (test_if, 2)) == LABEL_REF
+ && XEXP (XEXP (test_if, 2), 0) == JUMP_LABEL (jump))
+ cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)),
+ GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1));
+
+ return cond;
+}
+
+/* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
+ to conditional execution. Return TRUE if we were successful at
+ converting the the block. */
+
+static int
+cond_exec_process_if_block (test_bb, then_bb, else_bb, join_bb)
+ basic_block test_bb; /* Basic block test is in */
+ basic_block then_bb; /* Basic block for THEN block */
+ basic_block else_bb; /* Basic block for ELSE block */
+ basic_block join_bb; /* Basic block the join label is in */
+{
+ rtx test_expr; /* expression in IF_THEN_ELSE that is tested */
+ rtx then_start; /* first insn in THEN block */
+ rtx then_end; /* last insn + 1 in THEN block */
+ rtx else_start; /* first insn in ELSE block or NULL */
+ rtx else_end; /* last insn + 1 in ELSE block */
+ int max; /* max # of insns to convert. */
+ int then_mod_ok; /* whether conditional mods are ok in THEN */
+ rtx true_expr; /* test for else block insns */
+ rtx false_expr; /* test for then block insns */
+ int n_insns;
+
+ /* Find the conditional jump to the ELSE or JOIN part, and isolate
+ the test. */
+ test_expr = cond_exec_get_condition (test_bb->end);
+ if (! test_expr)
+ return FALSE;
+
+ /* Collect the bounds of where we're to search. */
+
+ then_start = then_bb->head;
+ then_end = then_bb->end;
+
+ /* Skip a (use (const_int 0)) or branch as the final insn. */
+ if (GET_CODE (then_end) == INSN
+ && GET_CODE (PATTERN (then_end)) == USE
+ && GET_CODE (XEXP (PATTERN (then_end), 0)) == CONST_INT)
+ then_end = PREV_INSN (then_end);
+ else if (GET_CODE (then_end) == JUMP_INSN)
+ then_end = PREV_INSN (then_end);
+
+ if (else_bb)
+ {
+ /* Skip the ELSE block's label. */
+ else_start = NEXT_INSN (else_bb->head);
+ else_end = else_bb->end;
+
+ /* Skip a (use (const_int 0)) or branch as the final insn. */
+ if (GET_CODE (else_end) == INSN
+ && GET_CODE (PATTERN (else_end)) == USE
+ && GET_CODE (XEXP (PATTERN (else_end), 0)) == CONST_INT)
+ else_end = PREV_INSN (else_end);
+ else if (GET_CODE (else_end) == JUMP_INSN)
+ else_end = PREV_INSN (else_end);
+ }
+
+ /* How many instructions should we convert in total? */
+ n_insns = 0;
+ if (else_bb)
+ {
+ max = 2 * MAX_CONDITIONAL_EXECUTE;
+ n_insns = count_bb_insns (else_bb);
+ }
+ else
+ max = MAX_CONDITIONAL_EXECUTE;
+ n_insns += count_bb_insns (then_bb);
+ if (n_insns > max)
+ return FALSE;
+
+ /* Map test_expr/test_jump into the appropriate MD tests to use on
+ the conditionally executed code. */
+
+ true_expr = test_expr;
+ false_expr = gen_rtx_fmt_ee (reverse_condition (GET_CODE (true_expr)),
+ GET_MODE (true_expr), XEXP (true_expr, 0),
+ XEXP (true_expr, 1));
+
+ /* For IF-THEN-ELSE blocks, we don't allow modifications of the test
+ on then THEN block. */
+ then_mod_ok = (else_bb == NULL_BLOCK);
+
+ /* Go through the THEN and ELSE blocks converting the insns if possible
+ to conditional execution. */
+
+ if (then_end
+ && ! cond_exec_process_insns (then_start, then_end,
+ false_expr, then_mod_ok))
+ goto fail;
+
+ if (else_bb
+ && ! cond_exec_process_insns (else_start, else_end,
+ true_expr, TRUE))
+ goto fail;
+
+ if (! apply_change_group ())
+ return FALSE;
+
+ /* Conversion succeeded. */
+ if (rtl_dump_file)
+ fprintf (rtl_dump_file, "%d insn%s converted to conditional execution.\n",
+ n_insns, (n_insns == 1) ? " was" : "s were");
+
+ /* Merge the blocks! */
+ merge_if_block (test_bb, then_bb, else_bb, join_bb);
+ return TRUE;
+
+ fail:
+ cancel_changes (0);
+ return FALSE;
+}
+
+/* Used by noce_process_if_block to communicate with its subroutines.
+
+ The subroutines know that A and B may be evaluated freely. They
+ know that X is a register. They should insert new instructions
+ before cond_earliest. */
+
+struct noce_if_info
+{
+ rtx insn_a, insn_b;
+ rtx x, a, b;
+ rtx jump, cond, cond_earliest;
+};
+
+static rtx noce_emit_store_flag PARAMS ((struct noce_if_info *,
+ rtx, int, int));
+static int noce_try_store_flag PARAMS ((struct noce_if_info *));
+static int noce_try_store_flag_inc PARAMS ((struct noce_if_info *));
+static int noce_try_store_flag_constants PARAMS ((struct noce_if_info *));
+static int noce_try_store_flag_mask PARAMS ((struct noce_if_info *));
+static rtx noce_emit_cmove PARAMS ((struct noce_if_info *,
+ rtx, enum rtx_code, rtx,
+ rtx, rtx, rtx));
+static int noce_try_cmove PARAMS ((struct noce_if_info *));
+static int noce_try_cmove_arith PARAMS ((struct noce_if_info *));
+
+/* Helper function for noce_try_store_flag*. */
+
+static rtx
+noce_emit_store_flag (if_info, x, reversep, normalize)
+ struct noce_if_info *if_info;
+ rtx x;
+ int reversep, normalize;
+{
+ rtx cond = if_info->cond;
+ int cond_complex;
+ enum rtx_code code;
+
+ cond_complex = (! general_operand (XEXP (cond, 0), VOIDmode)
+ || ! general_operand (XEXP (cond, 1), VOIDmode));
+
+ /* If earliest == jump, or when the condition is complex, try to
+ build the store_flag insn directly. */
+
+ if (cond_complex)
+ cond = XEXP (SET_SRC (PATTERN (if_info->jump)), 0);
+
+ if ((if_info->cond_earliest == if_info->jump || cond_complex)
+ && (normalize == 0 || STORE_FLAG_VALUE == normalize))
+ {
+ rtx tmp;
+
+ code = GET_CODE (cond);
+ if (reversep)
+ code = reverse_condition (code);
+
+ tmp = gen_rtx_fmt_ee (code, GET_MODE (x), XEXP (cond, 0),
+ XEXP (cond, 1));
+ tmp = gen_rtx_SET (VOIDmode, x, tmp);
+
+ start_sequence ();
+ tmp = emit_insn (tmp);
+
+ if (recog_memoized (tmp) >= 0)
+ {
+ tmp = get_insns ();
+ end_sequence ();
+ emit_insns (tmp);
+
+ if_info->cond_earliest = if_info->jump;
+
+ return x;
+ }
+
+ end_sequence ();
+ }
+
+ /* Don't even try if the comparison operands are weird. */
+ if (cond_complex)
+ return NULL_RTX;
+
+ code = GET_CODE (cond);
+ if (reversep)
+ code = reverse_condition (code);
+
+ return emit_store_flag (x, code, XEXP (cond, 0),
+ XEXP (cond, 1), VOIDmode,
+ (code == LTU || code == LEU
+ || code == GEU || code == GTU), normalize);
+}
+
+/* Convert "if (test) x = 1; else x = 0".
+
+ Only try 0 and STORE_FLAG_VALUE here. Other combinations will be
+ tried in noce_try_store_flag_constants after noce_try_cmove has had
+ a go at the conversion. */
+
+static int
+noce_try_store_flag (if_info)
+ struct noce_if_info *if_info;
+{
+ int reversep;
+ rtx target, seq;
+
+ if (GET_CODE (if_info->b) == CONST_INT
+ && INTVAL (if_info->b) == STORE_FLAG_VALUE
+ && if_info->a == const0_rtx)
+ reversep = 0;
+ else if (if_info->b == const0_rtx
+ && GET_CODE (if_info->a) == CONST_INT
+ && INTVAL (if_info->a) == STORE_FLAG_VALUE
+ && can_reverse_comparison_p (if_info->cond, if_info->jump))
+ reversep = 1;
+ else
+ return FALSE;
+
+ start_sequence ();
+
+ target = noce_emit_store_flag (if_info, if_info->x, reversep, 0);
+ if (target)
+ {
+ if (target != if_info->x)
+ emit_move_insn (if_info->x, target);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, if_info->cond_earliest);
+
+ return TRUE;
+ }
+ else
+ {
+ end_sequence ();
+ return FALSE;
+ }
+}
+
+/* Convert "if (test) x = a; else x = b", for A and B constant. */
+
+static int
+noce_try_store_flag_constants (if_info)
+ struct noce_if_info *if_info;
+{
+ rtx target, seq;
+ int reversep;
+ HOST_WIDE_INT itrue, ifalse, diff, tmp;
+ int normalize, can_reverse;
+
+ if (! no_new_pseudos
+ && GET_CODE (if_info->a) == CONST_INT
+ && GET_CODE (if_info->b) == CONST_INT)
+ {
+ ifalse = INTVAL (if_info->a);
+ itrue = INTVAL (if_info->b);
+ diff = itrue - ifalse;
+
+ can_reverse = can_reverse_comparison_p (if_info->cond, if_info->jump);
+
+ reversep = 0;
+ if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
+ normalize = 0;
+ else if (ifalse == 0 && exact_log2 (itrue) >= 0
+ && (STORE_FLAG_VALUE == 1
+ || BRANCH_COST >= 2))
+ normalize = 1;
+ else if (itrue == 0 && exact_log2 (ifalse) >= 0 && can_reverse
+ && (STORE_FLAG_VALUE == 1 || BRANCH_COST >= 2))
+ normalize = 1, reversep = 1;
+ else if (itrue == -1
+ && (STORE_FLAG_VALUE == -1
+ || BRANCH_COST >= 2))
+ normalize = -1;
+ else if (ifalse == -1 && can_reverse
+ && (STORE_FLAG_VALUE == -1 || BRANCH_COST >= 2))
+ normalize = -1, reversep = 1;
+ else if ((BRANCH_COST >= 2 && STORE_FLAG_VALUE == -1)
+ || BRANCH_COST >= 3)
+ normalize = -1;
+ else
+ return FALSE;
+
+ if (reversep)
+ {
+ tmp = itrue; itrue = ifalse; ifalse = tmp;
+ diff = -diff;
+ }
+
+ start_sequence ();
+ target = noce_emit_store_flag (if_info, if_info->x, reversep, normalize);
+ if (! target)
+ {
+ end_sequence ();
+ return FALSE;
+ }
+
+ /* if (test) x = 3; else x = 4;
+ => x = 3 + (test == 0); */
+ if (diff == STORE_FLAG_VALUE || diff == -STORE_FLAG_VALUE)
+ {
+ target = expand_binop (GET_MODE (if_info->x),
+ (diff == STORE_FLAG_VALUE
+ ? add_optab : sub_optab),
+ GEN_INT (ifalse), target, if_info->x, 0,
+ OPTAB_WIDEN);
+ }
+
+ /* if (test) x = 8; else x = 0;
+ => x = (test != 0) << 3; */
+ else if (ifalse == 0 && (tmp = exact_log2 (itrue)) >= 0)
+ {
+ target = expand_binop (GET_MODE (if_info->x), ashl_optab,
+ target, GEN_INT (tmp), if_info->x, 0,
+ OPTAB_WIDEN);
+ }
+
+ /* if (test) x = -1; else x = b;
+ => x = -(test != 0) | b; */
+ else if (itrue == -1)
+ {
+ target = expand_binop (GET_MODE (if_info->x), ior_optab,
+ target, GEN_INT (ifalse), if_info->x, 0,
+ OPTAB_WIDEN);
+ }
+
+ /* if (test) x = a; else x = b;
+ => x = (-(test != 0) & (b - a)) + a; */
+ else
+ {
+ target = expand_binop (GET_MODE (if_info->x), and_optab,
+ target, GEN_INT (diff), if_info->x, 0,
+ OPTAB_WIDEN);
+ if (target)
+ target = expand_binop (GET_MODE (if_info->x), add_optab,
+ target, GEN_INT (ifalse), if_info->x, 0,
+ OPTAB_WIDEN);
+ }
+
+ if (! target)
+ {
+ end_sequence ();
+ return FALSE;
+ }
+
+ if (target != if_info->x)
+ emit_move_insn (if_info->x, target);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, if_info->cond_earliest);
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/* Convert "if (test) foo++" into "foo += (test != 0)", and
+ similarly for "foo--". */
+
+static int
+noce_try_store_flag_inc (if_info)
+ struct noce_if_info *if_info;
+{
+ rtx target, seq;
+ int subtract, normalize;
+
+ if (! no_new_pseudos
+ && (BRANCH_COST >= 2
+ || HAVE_incscc
+ || HAVE_decscc)
+ /* Should be no `else' case to worry about. */
+ && if_info->b == if_info->x
+ && GET_CODE (if_info->a) == PLUS
+ && (XEXP (if_info->a, 1) == const1_rtx
+ || XEXP (if_info->a, 1) == constm1_rtx)
+ && rtx_equal_p (XEXP (if_info->a, 0), if_info->x)
+ && can_reverse_comparison_p (if_info->cond, if_info->jump))
+ {
+ if (STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
+ subtract = 0, normalize = 0;
+ else if (-STORE_FLAG_VALUE == INTVAL (XEXP (if_info->a, 1)))
+ subtract = 1, normalize = 0;
+ else
+ subtract = 0, normalize = INTVAL (XEXP (if_info->a, 1));
+
+ start_sequence ();
+
+ target = noce_emit_store_flag (if_info,
+ gen_reg_rtx (GET_MODE (if_info->x)),
+ 1, normalize);
+
+ if (target)
+ target = expand_binop (GET_MODE (if_info->x),
+ subtract ? sub_optab : add_optab,
+ if_info->x, target, if_info->x, 0, OPTAB_WIDEN);
+ if (target)
+ {
+ if (target != if_info->x)
+ emit_move_insn (if_info->x, target);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, if_info->cond_earliest);
+
+ return TRUE;
+ }
+
+ end_sequence ();
+ }
+
+ return FALSE;
+}
+
+/* Convert "if (test) x = 0;" to "x &= -(test == 0);" */
+
+static int
+noce_try_store_flag_mask (if_info)
+ struct noce_if_info *if_info;
+{
+ rtx target, seq;
+ int reversep;
+
+ reversep = 0;
+ if (! no_new_pseudos
+ && (BRANCH_COST >= 2
+ || STORE_FLAG_VALUE == -1)
+ && ((if_info->a == const0_rtx
+ && rtx_equal_p (if_info->b, if_info->x))
+ || ((reversep = can_reverse_comparison_p (if_info->cond,
+ if_info->jump))
+ && if_info->b == const0_rtx
+ && rtx_equal_p (if_info->a, if_info->x))))
+ {
+ start_sequence ();
+ target = noce_emit_store_flag (if_info,
+ gen_reg_rtx (GET_MODE (if_info->x)),
+ reversep, -1);
+ if (target)
+ target = expand_binop (GET_MODE (if_info->x), and_optab,
+ if_info->x, target, if_info->x, 0,
+ OPTAB_WIDEN);
+
+ if (target)
+ {
+ if (target != if_info->x)
+ emit_move_insn (if_info->x, target);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, if_info->cond_earliest);
+
+ return TRUE;
+ }
+
+ end_sequence ();
+ }
+
+ return FALSE;
+}
+
+/* Helper function for noce_try_cmove and noce_try_cmove_arith. */
+
+static rtx
+noce_emit_cmove (if_info, x, code, cmp_a, cmp_b, vfalse, vtrue)
+ struct noce_if_info *if_info;
+ rtx x, cmp_a, cmp_b, vfalse, vtrue;
+ enum rtx_code code;
+{
+ /* If earliest == jump, try to build the cmove insn directly.
+ This is helpful when combine has created some complex condition
+ (like for alpha's cmovlbs) that we can't hope to regenerate
+ through the normal interface. */
+
+ if (if_info->cond_earliest == if_info->jump)
+ {
+ rtx tmp;
+
+ tmp = gen_rtx_fmt_ee (code, GET_MODE (if_info->cond), cmp_a, cmp_b);
+ tmp = gen_rtx_IF_THEN_ELSE (GET_MODE (x), tmp, vtrue, vfalse);
+ tmp = gen_rtx_SET (VOIDmode, x, tmp);
+
+ start_sequence ();
+ tmp = emit_insn (tmp);
+
+ if (recog_memoized (tmp) >= 0)
+ {
+ tmp = get_insns ();
+ end_sequence ();
+ emit_insns (tmp);
+
+ return x;
+ }
+
+ end_sequence ();
+ }
+
+ /* Don't even try if the comparison operands are weird. */
+ if (! general_operand (cmp_a, GET_MODE (cmp_a))
+ || ! general_operand (cmp_b, GET_MODE (cmp_b)))
+ return NULL_RTX;
+
+ return emit_conditional_move (x, code, cmp_a, cmp_b, VOIDmode,
+ vtrue, vfalse, GET_MODE (x),
+ (code == LTU || code == GEU
+ || code == LEU || code == GTU));
+}
+
+/* Try only simple constants and registers here. More complex cases
+ are handled in noce_try_cmove_arith after noce_try_store_flag_arith
+ has had a go at it. */
+
+static int
+noce_try_cmove (if_info)
+ struct noce_if_info *if_info;
+{
+ enum rtx_code code;
+ rtx target, seq;
+
+ if ((CONSTANT_P (if_info->a) || register_operand (if_info->a, VOIDmode))
+ && (CONSTANT_P (if_info->b) || register_operand (if_info->b, VOIDmode)))
+ {
+ start_sequence ();
+
+ code = GET_CODE (if_info->cond);
+ target = noce_emit_cmove (if_info, if_info->x, code,
+ XEXP (if_info->cond, 0),
+ XEXP (if_info->cond, 1),
+ if_info->a, if_info->b);
+
+ if (target)
+ {
+ if (target != if_info->x)
+ emit_move_insn (if_info->x, target);
+
+ seq = get_insns ();
+ end_sequence ();
+ emit_insns_before (seq, if_info->cond_earliest);
+ return TRUE;
+ }
+ else
+ {
+ end_sequence ();
+ return FALSE;
+ }
+ }
+
+ return FALSE;
+}
+
+/* Try more complex cases involving conditional_move. */
+
+static int
+noce_try_cmove_arith (if_info)
+ struct noce_if_info *if_info;
+{
+ rtx a = if_info->a;
+ rtx b = if_info->b;
+ rtx x = if_info->x;
+ rtx insn_a, insn_b;
+ rtx tmp, target;
+ int is_mem = 0;
+ enum rtx_code code;
+
+ /* A conditional move from two memory sources is equivalent to a
+ conditional on their addresses followed by a load. Don't do this
+ early because it'll screw alias analysis. Note that we've
+ already checked for no side effects. */
+ if (! no_new_pseudos && cse_not_expected
+ && GET_CODE (a) == MEM && GET_CODE (b) == MEM
+ && BRANCH_COST >= 5)
+ {
+ a = XEXP (a, 0);
+ b = XEXP (b, 0);
+ x = gen_reg_rtx (Pmode);
+ is_mem = 1;
+ }
+
+ /* ??? We could handle this if we knew that a load from A or B could
+ not fault. This is true of stack memories or if we've already loaded
+ from the address along the path from ENTRY. */
+ else if (GET_CODE (a) == MEM || GET_CODE (b) == MEM)
+ return FALSE;
+
+ /* if (test) x = a + b; else x = c - d;
+ => y = a + b;
+ x = c - d;
+ if (test)
+ x = y;
+ */
+
+ code = GET_CODE (if_info->cond);
+ insn_a = if_info->insn_a;
+ insn_b = if_info->insn_b;
+
+ /* Possibly rearrange operands to make things come out more natural. */
+ if (can_reverse_comparison_p (if_info->cond, if_info->jump))
+ {
+ int reversep = 0;
+ if (rtx_equal_p (b, x))
+ reversep = 1;
+ else if (general_operand (b, GET_MODE (b)))
+ reversep = 1;
+
+ if (reversep)
+ {
+ code = reverse_condition (code);
+ tmp = a, a = b, b = tmp;
+ tmp = insn_a, insn_a = insn_b, insn_b = tmp;
+ }
+ }
+
+ start_sequence ();
+
+ /* If either operand is complex, load it into a register first.
+ The best way to do this is to copy the original insn. In this
+ way we preserve any clobbers etc that the insn may have had.
+ This is of course not possible in the IS_MEM case. */
+ if (! general_operand (a, GET_MODE (a)))
+ {
+ rtx set;
+
+ if (no_new_pseudos)
+ goto end_seq_and_fail;
+
+ if (is_mem)
+ {
+ tmp = gen_reg_rtx (GET_MODE (a));
+ tmp = emit_insn (gen_rtx_SET (VOIDmode, tmp, a));
+ }
+ else if (! insn_a)
+ goto end_seq_and_fail;
+ else
+ {
+ a = gen_reg_rtx (GET_MODE (a));
+ tmp = copy_rtx (insn_a);
+ set = single_set (tmp);
+ SET_DEST (set) = a;
+ tmp = emit_insn (PATTERN (tmp));
+ }
+ if (recog_memoized (tmp) < 0)
+ goto end_seq_and_fail;
+ }
+ if (! general_operand (b, GET_MODE (b)))
+ {
+ rtx set;
+
+ if (no_new_pseudos)
+ goto end_seq_and_fail;
+
+ if (is_mem)
+ {
+ tmp = gen_reg_rtx (GET_MODE (b));
+ tmp = emit_insn (gen_rtx_SET (VOIDmode, tmp, b));
+ }
+ else if (! insn_b)
+ goto end_seq_and_fail;
+ else
+ {
+ b = gen_reg_rtx (GET_MODE (b));
+ tmp = copy_rtx (insn_b);
+ set = single_set (tmp);
+ SET_DEST (set) = b;
+ tmp = emit_insn (PATTERN (tmp));
+ }
+ if (recog_memoized (tmp) < 0)
+ goto end_seq_and_fail;
+ }
+
+ target = noce_emit_cmove (if_info, x, code, XEXP (if_info->cond, 0),
+ XEXP (if_info->cond, 1), a, b);
+
+ if (! target)
+ goto end_seq_and_fail;
+
+ /* If we're handling a memory for above, emit the load now. */
+ if (is_mem)
+ {
+ tmp = gen_rtx_MEM (GET_MODE (if_info->x), target);
+
+ /* Copy over flags as appropriate. */
+ if (MEM_VOLATILE_P (if_info->a) || MEM_VOLATILE_P (if_info->b))
+ MEM_VOLATILE_P (tmp) = 1;
+ if (MEM_IN_STRUCT_P (if_info->a) && MEM_IN_STRUCT_P (if_info->b))
+ MEM_IN_STRUCT_P (tmp) = 1;
+ if (MEM_SCALAR_P (if_info->a) && MEM_SCALAR_P (if_info->b))
+ MEM_SCALAR_P (tmp) = 1;
+ if (MEM_ALIAS_SET (if_info->a) == MEM_ALIAS_SET (if_info->b))
+ MEM_ALIAS_SET (tmp) = MEM_ALIAS_SET (if_info->a);
+
+ emit_move_insn (if_info->x, tmp);
+ }
+ else if (target != x)
+ emit_move_insn (x, target);
+
+ tmp = get_insns ();
+ end_sequence ();
+ emit_insns_before (tmp, if_info->cond_earliest);
+ return TRUE;
+
+ end_seq_and_fail:
+ end_sequence ();
+ return FALSE;
+}
+
+/* Look for the condition for the jump first. We'd prefer to avoid
+ get_condition if we can -- it tries to look back for the contents
+ of an original compare. On targets that use normal integers for
+ comparisons, e.g. alpha, this is wasteful. */
+
+static rtx
+noce_get_condition (jump, earliest)
+ rtx jump;
+ rtx *earliest;
+{
+ rtx cond;
+
+ /* If the condition variable is a register and is MODE_INT, accept it.
+ Otherwise, fall back on get_condition. */
+
+ if (! condjump_p (jump))
+ return NULL_RTX;
+
+ cond = XEXP (SET_SRC (PATTERN (jump)), 0);
+ if (GET_CODE (XEXP (cond, 0)) == REG
+ && GET_MODE_CLASS (GET_MODE (XEXP (cond, 0))) == MODE_INT)
+ {
+ *earliest = jump;
+
+ /* If this branches to JUMP_LABEL when the condition is false,
+ reverse the condition. */
+ if (GET_CODE (XEXP (SET_SRC (PATTERN (jump)), 2)) == LABEL_REF
+ && XEXP (XEXP (SET_SRC (PATTERN (jump)), 2), 0) == JUMP_LABEL (jump))
+ cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)),
+ GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1));
+ }
+ else
+ cond = get_condition (jump, earliest);
+
+ return cond;
+}
+
+/* Given a simple IF-THEN or IF-THEN-ELSE block, attempt to convert it
+ without using conditional execution. Return TRUE if we were
+ successful at converting the the block. */
+
+static int
+noce_process_if_block (test_bb, then_bb, else_bb, join_bb)
+ basic_block test_bb; /* Basic block test is in */
+ basic_block then_bb; /* Basic block for THEN block */
+ basic_block else_bb; /* Basic block for ELSE block */
+ basic_block join_bb; /* Basic block the join label is in */
+{
+ /* We're looking for patterns of the form
+
+ (1) if (...) x = a; else x = b;
+ (2) x = b; if (...) x = a;
+ (3) if (...) x = a; // as if with an initial x = x.
+
+ The later patterns require jumps to be more expensive.
+
+ ??? For future expansion, look for multiple X in such patterns. */
+
+ struct noce_if_info if_info;
+ rtx insn_a, insn_b;
+ rtx set_a, set_b;
+ rtx orig_x, x, a, b;
+ rtx jump, cond;
+
+ /* If this is not a standard conditional jump, we can't parse it. */
+ jump = test_bb->end;
+ cond = noce_get_condition (jump, &if_info.cond_earliest);
+ if (! cond)
+ return FALSE;
+
+ /* We must be comparing objects whose modes imply the size. */
+ if (GET_MODE (XEXP (cond, 0)) == BLKmode)
+ return FALSE;
+
+ /* Look for one of the potential sets. */
+ insn_a = first_active_insn (then_bb);
+ if (! insn_a
+ || ! last_active_insn_p (then_bb, insn_a)
+ || (set_a = single_set (insn_a)) == NULL_RTX)
+ return FALSE;
+
+ x = SET_DEST (set_a);
+ a = SET_SRC (set_a);
+
+ /* Look for the other potential set. Make sure we've got equivalent
+ destinations. */
+ /* ??? This is overconservative. Storing to two different mems is
+ as easy as conditionally computing the address. Storing to a
+ single mem merely requires a scratch memory to use as one of the
+ destination addresses; often the memory immediately below the
+ stack pointer is available for this. */
+ set_b = NULL_RTX;
+ if (else_bb)
+ {
+ insn_b = first_active_insn (else_bb);
+ if (! insn_b
+ || ! last_active_insn_p (else_bb, insn_b)
+ || (set_b = single_set (insn_b)) == NULL_RTX
+ || ! rtx_equal_p (x, SET_DEST (set_b)))
+ return FALSE;
+ }
+ else
+ {
+ insn_b = prev_nonnote_insn (if_info.cond_earliest);
+ if (! insn_b
+ || GET_CODE (insn_b) != INSN
+ || (set_b = single_set (insn_b)) == NULL_RTX
+ || ! rtx_equal_p (x, SET_DEST (set_b))
+ || reg_mentioned_p (x, cond))
+ insn_b = set_b = NULL_RTX;
+ }
+ b = (set_b ? SET_SRC (set_b) : x);
+
+ /* Only operate on register destinations, and even then avoid extending
+ the lifetime of hard registers on small register class machines. */
+ orig_x = x;
+ if (GET_CODE (x) != REG
+ || (SMALL_REGISTER_CLASSES
+ && REGNO (x) < FIRST_PSEUDO_REGISTER))
+ {
+ if (no_new_pseudos)
+ return FALSE;
+ x = gen_reg_rtx (GET_MODE (x));
+ }
+
+ /* Don't operate on sources that may trap or are volatile. */
+ if (side_effects_p (a) || side_effects_p (b)
+ || (GET_CODE (a) != MEM && may_trap_p (a))
+ || (GET_CODE (b) != MEM && may_trap_p (b)))
+ return FALSE;
+
+ /* Set up the info block for our subroutines. */
+ if_info.cond = cond;
+ if_info.jump = jump;
+ if_info.insn_a = insn_a;
+ if_info.insn_b = insn_b;
+ if_info.x = x;
+ if_info.a = a;
+ if_info.b = b;
+
+ /* Try optimizations in some approximation of a useful order. */
+ /* ??? Should first look to see if X is live incoming at all. If it
+ isn't, we don't need anything but an unconditional set. */
+
+ /* Look and see if A and B are really the same. Avoid creating silly
+ cmove constructs that no one will fix up later. */
+ if (rtx_equal_p (a, b))
+ {
+ /* If we have an INSN_B, we don't have to create any new rtl. Just
+ move the instruction that we already have. If we don't have an
+ INSN_B, that means that A == X, and we've got a noop move. In
+ that case don't do anything and let the code below delete INSN_A. */
+ if (insn_b && else_bb)
+ {
+ if (else_bb && insn_b == else_bb->end)
+ else_bb->end = PREV_INSN (insn_b);
+ reorder_insns (insn_b, insn_b, PREV_INSN (if_info.cond_earliest));
+ insn_b = NULL_RTX;
+ x = orig_x;
+ }
+ goto success;
+ }
+
+ if (noce_try_store_flag (&if_info))
+ goto success;
+ if (HAVE_conditional_move
+ && noce_try_cmove (&if_info))
+ goto success;
+ if (! HAVE_conditional_execution)
+ {
+ if (noce_try_store_flag_constants (&if_info))
+ goto success;
+ if (noce_try_store_flag_inc (&if_info))
+ goto success;
+ if (noce_try_store_flag_mask (&if_info))
+ goto success;
+ if (HAVE_conditional_move
+ && noce_try_cmove_arith (&if_info))
+ goto success;
+ }
+
+ return FALSE;
+
+ success:
+ /* The original sets may now be killed. */
+ if (insn_a == then_bb->end)
+ then_bb->end = PREV_INSN (insn_a);
+ flow_delete_insn (insn_a);
+
+ /* Several special cases here: First, we may have reused insn_b above,
+ in which case insn_b is now NULL. Second, we want to delete insn_b
+ if it came from the ELSE block, because follows the now correct
+ write that appears in the TEST block. However, if we got insn_b from
+ the TEST block, it may in fact be loading data needed for the comparison.
+ We'll let life_analysis remove the insn if it's really dead. */
+ if (insn_b && else_bb)
+ {
+ if (insn_b == else_bb->end)
+ else_bb->end = PREV_INSN (insn_b);
+ flow_delete_insn (insn_b);
+ }
+
+ /* The new insns will have been inserted before cond_earliest. We should
+ be able to remove cond_earliest through the jump with impunity. */
+ insn_a = prev_nonnote_insn (if_info.cond_earliest);
+ flow_delete_insn_chain (if_info.cond_earliest, test_bb->end);
+ test_bb->end = insn_a;
+
+ /* If we used a temporary, fix it up now. */
+ if (orig_x != x)
+ {
+ start_sequence ();
+ emit_move_insn (orig_x, x);
+ insn_b = gen_sequence ();
+ end_sequence ();
+
+ test_bb->end = emit_insn_after (insn_b, insn_a);
+ }
+
+ /* Merge the blocks! */
+ merge_if_block (test_bb, then_bb, else_bb, join_bb);
+
+ return TRUE;
+}
+
+/* Attempt to convert an IF-THEN or IF-THEN-ELSE block into
+ straight line code. Return true if successful. */
+
+static int
+process_if_block (test_bb, then_bb, else_bb, join_bb)
+ basic_block test_bb; /* Basic block test is in */
+ basic_block then_bb; /* Basic block for THEN block */
+ basic_block else_bb; /* Basic block for ELSE block */
+ basic_block join_bb; /* Basic block the join label is in */
+{
+ if (! reload_completed
+ && noce_process_if_block (test_bb, then_bb, else_bb, join_bb))
+ return TRUE;
+
+ if (HAVE_conditional_execution
+ && reload_completed
+ && cond_exec_process_if_block (test_bb, then_bb, else_bb, join_bb))
+ return TRUE;
+
+ return FALSE;
+}
+
+/* Merge the blocks and mark for local life update. */
+
+static void
+merge_if_block (test_bb, then_bb, else_bb, join_bb)
+ basic_block test_bb; /* Basic block test is in */
+ basic_block then_bb; /* Basic block for THEN block */
+ basic_block else_bb; /* Basic block for ELSE block */
+ basic_block join_bb; /* Basic block the join label is in */
+{
+ basic_block combo_bb;
+
+ /* All block merging is done into the lower block numbers. */
+
+ combo_bb = test_bb;
+
+ /* First merge TEST block into THEN block. This is a no-brainer since
+ the THEN block did not have a code label to begin with. */
+
+ if (combo_bb->global_live_at_end)
+ COPY_REG_SET (combo_bb->global_live_at_end, then_bb->global_live_at_end);
+ merge_blocks_nomove (combo_bb, then_bb);
+ num_removed_blocks++;
+
+ /* The ELSE block, if it existed, had a label. That label count
+ will almost always be zero, but odd things can happen when labels
+ get their addresses taken. */
+ if (else_bb)
+ {
+ if (LABEL_NUSES (else_bb->head) == 0
+ && ! LABEL_PRESERVE_P (else_bb->head)
+ && ! LABEL_NAME (else_bb->head))
+ {
+ /* We can merge the ELSE. */
+ merge_blocks_nomove (combo_bb, else_bb);
+ num_removed_blocks++;
+ }
+ else
+ {
+ /* We cannot merge the ELSE. */
+
+ /* Properly rewire the edge out of the now combined
+ TEST-THEN block to point here. */
+ remove_edge (combo_bb->succ);
+ if (combo_bb->succ || else_bb->pred)
+ abort ();
+ make_edge (NULL, combo_bb, else_bb, EDGE_FALLTHRU);
+
+ /* Remove the jump and cruft from the end of the TEST-THEN block. */
+ tidy_fallthru_edge (combo_bb->succ, combo_bb, else_bb);
+
+ /* Make sure we update life info properly. */
+ SET_UPDATE_LIFE(combo_bb);
+ if (else_bb->global_live_at_end)
+ COPY_REG_SET (else_bb->global_live_at_start,
+ else_bb->global_live_at_end);
+
+ /* The ELSE is the new combo block. */
+ combo_bb = else_bb;
+ }
+ }
+
+ /* If there was no join block reported, that means it was not adjacent
+ to the others, and so we cannot merge them. */
+
+ if (! join_bb)
+ {
+ /* The outgoing edge for the current COMBO block should already
+ be correct. Verify this. */
+ if (combo_bb->succ == NULL_EDGE)
+ abort ();
+
+ /* There should sill be a branch at the end of the THEN or ELSE
+ blocks taking us to our final destination. */
+ if (! simplejump_p (combo_bb->end)
+ && ! returnjump_p (combo_bb->end))
+ abort ();
+ }
+
+ /* The JOIN block had a label. It may have had quite a number
+ of other predecessors too, but probably not. See if we can
+ merge this with the others. */
+ else if (LABEL_NUSES (join_bb->head) == 0
+ && ! LABEL_PRESERVE_P (join_bb->head)
+ && ! LABEL_NAME (join_bb->head))
+ {
+ /* We can merge the JOIN. */
+ if (combo_bb->global_live_at_end)
+ COPY_REG_SET (combo_bb->global_live_at_end,
+ join_bb->global_live_at_end);
+ merge_blocks_nomove (combo_bb, join_bb);
+ num_removed_blocks++;
+ }
+ else
+ {
+ /* We cannot merge the JOIN. */
+
+ /* The outgoing edge for the current COMBO block should already
+ be correct. Verify this. */
+ if (combo_bb->succ->succ_next != NULL_EDGE
+ || combo_bb->succ->dest != join_bb)
+ abort ();
+
+ /* Remove the jump and cruft from the end of the COMBO block. */
+ tidy_fallthru_edge (combo_bb->succ, combo_bb, join_bb);
+ }
+
+ /* Make sure we update life info properly. */
+ SET_UPDATE_LIFE (combo_bb);
+
+ num_updated_if_blocks++;
+}
+
+/* Find a block ending in a simple IF condition. Return TRUE if
+ we were able to transform it in some way. */
+
+static int
+find_if_header (test_bb)
+ basic_block test_bb;
+{
+ edge then_edge;
+ edge else_edge;
+
+ /* The kind of block we're looking for has exactly two successors. */
+ if ((then_edge = test_bb->succ) == NULL_EDGE
+ || (else_edge = then_edge->succ_next) == NULL_EDGE
+ || else_edge->succ_next != NULL_EDGE)
+ return FALSE;
+
+ /* Neither edge should be abnormal. */
+ if ((then_edge->flags & EDGE_COMPLEX)
+ || (else_edge->flags & EDGE_COMPLEX))
+ return FALSE;
+
+ /* The THEN edge is canonically the one that falls through. */
+ if (then_edge->flags & EDGE_FALLTHRU)
+ ;
+ else if (else_edge->flags & EDGE_FALLTHRU)
+ {
+ edge e = else_edge;
+ else_edge = then_edge;
+ then_edge = e;
+ }
+ else
+ /* Otherwise this must be a multiway branch of some sort. */
+ return FALSE;
+
+ if (find_if_block (test_bb, then_edge, else_edge))
+ goto success;
+ if (post_dominators
+ && (! HAVE_conditional_execution || reload_completed))
+ {
+ if (find_if_case_1 (test_bb, then_edge, else_edge))
+ goto success;
+ if (find_if_case_2 (test_bb, then_edge, else_edge))
+ goto success;
+ }
+
+ return FALSE;
+
+ success:
+ if (rtl_dump_file)
+ fprintf (rtl_dump_file, "Conversion succeeded.\n");
+ return TRUE;
+}
+
+/* Determine if a given basic block heads a simple IF-THEN or IF-THEN-ELSE
+ block. If so, we'll try to convert the insns to not require the branch.
+ Return TRUE if we were successful at converting the the block. */
+
+static int
+find_if_block (test_bb, then_edge, else_edge)
+ basic_block test_bb;
+ edge then_edge, else_edge;
+{
+ basic_block then_bb = then_edge->dest;
+ basic_block else_bb = else_edge->dest;
+ basic_block join_bb = NULL_BLOCK;
+ edge then_succ = then_bb->succ;
+ edge else_succ = else_bb->succ;
+ int next_index;
+
+ /* The THEN block of an IF-THEN combo must have exactly one predecessor. */
+ if (then_bb->pred->pred_next != NULL_EDGE)
+ return FALSE;
+
+ /* The THEN block of an IF-THEN combo must have exactly one successor. */
+ if (then_succ == NULL_EDGE
+ || then_succ->succ_next != NULL_EDGE
+ || (then_succ->flags & EDGE_COMPLEX))
+ return FALSE;
+
+ /* The THEN block may not start with a label, as might happen with an
+ unused user label that has had its address taken. */
+ if (GET_CODE (then_bb->head) == CODE_LABEL)
+ return FALSE;
+
+ /* If the THEN block's successor is the other edge out of the TEST block,
+ then we have an IF-THEN combo without an ELSE. */
+ if (then_succ->dest == else_bb)
+ {
+ join_bb = else_bb;
+ else_bb = NULL_BLOCK;
+ }
+
+ /* If the THEN and ELSE block meet in a subsequent block, and the ELSE
+ has exactly one predecessor and one successor, and the outgoing edge
+ is not complex, then we have an IF-THEN-ELSE combo. */
+ else if (else_succ != NULL_EDGE
+ && then_succ->dest == else_succ->dest
+ && else_bb->pred->pred_next == NULL_EDGE
+ && else_succ->succ_next == NULL_EDGE
+ && ! (else_succ->flags & EDGE_COMPLEX))
+ join_bb = else_succ->dest;
+
+ /* Otherwise it is not an IF-THEN or IF-THEN-ELSE combination. */
+ else
+ return FALSE;
+
+ num_possible_if_blocks++;
+
+ if (rtl_dump_file)
+ {
+ if (else_bb)
+ fprintf (rtl_dump_file,
+ "\nIF-THEN-ELSE block found, start %d, then %d, else %d, join %d\n",
+ test_bb->index, then_bb->index, else_bb->index,
+ join_bb->index);
+ else
+ fprintf (rtl_dump_file,
+ "\nIF-THEN block found, start %d, then %d, join %d\n",
+ test_bb->index, then_bb->index, join_bb->index);
+ }
+
+ /* Make sure IF, THEN, and ELSE, blocks are adjacent. Actually, we
+ get the first condition for free, since we've already asserted that
+ there's a fallthru edge from IF to THEN. */
+ /* ??? As an enhancement, move the ELSE block. Have to deal with EH and
+ BLOCK notes, if by no other means than aborting the merge if they
+ exist. Sticky enough I don't want to think about it now. */
+ next_index = then_bb->index;
+ if (else_bb && ++next_index != else_bb->index)
+ return FALSE;
+ if (++next_index != join_bb->index)
+ {
+ if (else_bb)
+ join_bb = NULL;
+ else
+ return FALSE;
+ }
+
+ /* Do the real work. */
+ return process_if_block (test_bb, then_bb, else_bb, join_bb);
+}
+
+/* Look for IF-THEN-ELSE cases in which one of THEN or ELSE is
+ transformable, but not necessarily the other. There need be no
+ JOIN block.
+
+ Return TRUE if we were successful at converting the the block.
+
+ Cases we'd like to look at:
+
+ (1)
+ if (test) goto over; // x not live
+ x = a;
+ goto label;
+ over:
+
+ becomes
+
+ x = a;
+ if (! test) goto label;
+
+ (2)
+ if (test) goto E; // x not live
+ x = big();
+ goto L;
+ E:
+ x = b;
+ goto M;
+
+ becomes
+
+ x = b;
+ if (test) goto M;
+ x = big();
+ goto L;
+
+ (3) // This one's really only interesting for targets that can do
+ // multiway branching, e.g. IA-64 BBB bundles. For other targets
+ // it results in multiple branches on a cache line, which often
+ // does not sit well with predictors.
+
+ if (test1) goto E; // predicted not taken
+ x = a;
+ if (test2) goto F;
+ ...
+ E:
+ x = b;
+ J:
+
+ becomes
+
+ x = a;
+ if (test1) goto E;
+ if (test2) goto F;
+
+ Notes:
+
+ (A) Don't do (2) if the branch is predicted against the block we're
+ eliminating. Do it anyway if we can eliminate a branch; this requires
+ that the sole successor of the eliminated block postdominate the other
+ side of the if.
+
+ (B) With CE, on (3) we can steal from both sides of the if, creating
+
+ if (test1) x = a;
+ if (!test1) x = b;
+ if (test1) goto J;
+ if (test2) goto F;
+ ...
+ J:
+
+ Again, this is most useful if J postdominates.
+
+ (C) CE substitutes for helpful life information.
+
+ (D) These heuristics need a lot of work. */
+
+/* Tests for case 1 above. */
+
+static int
+find_if_case_1 (test_bb, then_edge, else_edge)
+ basic_block test_bb;
+ edge then_edge, else_edge;
+{
+ basic_block then_bb = then_edge->dest;
+ basic_block else_bb = else_edge->dest;
+ edge then_succ = then_bb->succ;
+ rtx new_lab;
+
+ /* THEN has one successor. */
+ if (!then_succ || then_succ->succ_next != NULL)
+ return FALSE;
+
+ /* THEN does not fall through, but is not strange either. */
+ if (then_succ->flags & (EDGE_COMPLEX | EDGE_FALLTHRU))
+ return FALSE;
+
+ /* THEN has one predecessor. */
+ if (then_bb->pred->pred_next != NULL)
+ return FALSE;
+
+ /* THEN has no label. */
+ if (GET_CODE (then_bb->head) == CODE_LABEL)
+ return FALSE;
+
+ /* ELSE follows THEN. (??? could be moved) */
+ if (else_bb->index != then_bb->index + 1)
+ return FALSE;
+
+ num_possible_if_blocks++;
+ if (rtl_dump_file)
+ fprintf (rtl_dump_file,
+ "\nIF-CASE-1 found, start %d, then %d\n",
+ test_bb->index, then_bb->index);
+
+ /* THEN is small. */
+ if (count_bb_insns (then_bb) > BRANCH_COST)
+ return FALSE;
+
+ /* Find the label for THEN's destination. */
+ if (then_succ->dest == EXIT_BLOCK_PTR)
+ new_lab = NULL_RTX;
+ else
+ {
+ new_lab = JUMP_LABEL (then_bb->end);
+ if (! new_lab)
+ abort ();
+ }
+
+ /* Registers set are dead, or are predicable. */
+ if (! dead_or_predicable (test_bb, then_bb, else_bb, new_lab, 1))
+ return FALSE;
+
+ /* Conversion went ok, including moving the insns and fixing up the
+ jump. Adjust the CFG to match. */
+
+ SET_UPDATE_LIFE (test_bb);
+ bitmap_operation (test_bb->global_live_at_end,
+ else_bb->global_live_at_start,
+ then_bb->global_live_at_end, BITMAP_IOR);
+
+ make_edge (NULL, test_bb, then_succ->dest, 0);
+ flow_delete_block (then_bb);
+ tidy_fallthru_edge (else_edge, test_bb, else_bb);
+
+ num_removed_blocks++;
+ num_updated_if_blocks++;
+
+ return TRUE;
+}
+
+/* Test for case 2 above. */
+
+static int
+find_if_case_2 (test_bb, then_edge, else_edge)
+ basic_block test_bb;
+ edge then_edge, else_edge;
+{
+ basic_block then_bb = then_edge->dest;
+ basic_block else_bb = else_edge->dest;
+ edge else_succ = else_bb->succ;
+ rtx new_lab, note;
+
+ /* ELSE has one successor. */
+ if (!else_succ || else_succ->succ_next != NULL)
+ return FALSE;
+
+ /* ELSE outgoing edge is not complex. */
+ if (else_succ->flags & EDGE_COMPLEX)
+ return FALSE;
+
+ /* ELSE has one predecessor. */
+ if (else_bb->pred->pred_next != NULL)
+ return FALSE;
+
+ /* ELSE has a label we can delete. */
+ if (LABEL_NUSES (else_bb->head) > 1
+ || LABEL_PRESERVE_P (else_bb->head)
+ || LABEL_NAME (else_bb->head))
+ return FALSE;
+
+ /* ELSE is predicted or SUCC(ELSE) postdominates THEN. */
+ note = find_reg_note (test_bb->end, REG_BR_PROB, NULL_RTX);
+ if (note && INTVAL (XEXP (note, 0)) >= REG_BR_PROB_BASE / 2)
+ ;
+ else if (else_succ->dest->index < 0
+ || (then_bb->index >= 0
+ && TEST_BIT (post_dominators[ORIG_INDEX (then_bb)],
+ ORIG_INDEX (else_succ->dest))))
+ ;
+ else
+ return FALSE;
+
+ num_possible_if_blocks++;
+ if (rtl_dump_file)
+ fprintf (rtl_dump_file,
+ "\nIF-CASE-2 found, start %d, else %d\n",
+ test_bb->index, else_bb->index);
+
+ /* ELSE is small. */
+ if (count_bb_insns (then_bb) > BRANCH_COST)
+ return FALSE;
+
+ /* Find the label for ELSE's destination. */
+ if (else_succ->dest == EXIT_BLOCK_PTR)
+ new_lab = NULL_RTX;
+ else
+ {
+ if (else_succ->flags & EDGE_FALLTHRU)
+ {
+ new_lab = else_succ->dest->head;
+ if (GET_CODE (new_lab) != CODE_LABEL)
+ abort ();
+ }
+ else
+ {
+ new_lab = JUMP_LABEL (else_bb->end);
+ if (! new_lab)
+ abort ();
+ }
+ }
+
+ /* Registers set are dead, or are predicable. */
+ if (! dead_or_predicable (test_bb, else_bb, then_bb, new_lab, 0))
+ return FALSE;
+
+ /* Conversion went ok, including moving the insns and fixing up the
+ jump. Adjust the CFG to match. */
+
+ SET_UPDATE_LIFE (test_bb);
+ bitmap_operation (test_bb->global_live_at_end,
+ then_bb->global_live_at_start,
+ else_bb->global_live_at_end, BITMAP_IOR);
+
+ remove_edge (else_edge);
+ make_edge (NULL, test_bb, else_succ->dest, 0);
+ flow_delete_block (else_bb);
+
+ num_removed_blocks++;
+ num_updated_if_blocks++;
+
+ /* ??? We may now fallthru from one of THEN's successors into a join
+ block. Rerun cleanup_cfg? Examine things manually? Wait? */
+
+ return TRUE;
+}
+
+/* A subroutine of dead_or_predicable called through for_each_rtx.
+ Return 1 if a memory is found. */
+
+static int
+find_memory (px, data)
+ rtx *px;
+ void *data ATTRIBUTE_UNUSED;
+{
+ return GET_CODE (*px) == MEM;
+}
+
+/* Used by the code above to perform the actual rtl transformations.
+ Return TRUE if successful.
+
+ TEST_BB is the block containing the conditional branch. MERGE_BB
+ is the block containing the code to manipulate. NEW_DEST is the
+ label TEST_BB should be branching to after the conversion.
+ REVERSEP is true if the sense of the branch should be reversed. */
+
+static int
+dead_or_predicable (test_bb, merge_bb, other_bb, new_dest, reversep)
+ basic_block test_bb, merge_bb, other_bb;
+ rtx new_dest;
+ int reversep;
+{
+ rtx head, end, jump, earliest, old_dest;
+
+ jump = test_bb->end;
+
+ /* Find the extent of the real code in the merge block. */
+ head = merge_bb->head;
+ end = merge_bb->end;
+
+ if (GET_CODE (head) == CODE_LABEL)
+ head = NEXT_INSN (head);
+ if (GET_CODE (head) == NOTE)
+ {
+ if (head == end)
+ {
+ head = end = NULL_RTX;
+ goto no_body;
+ }
+ head = NEXT_INSN (head);
+ }
+
+ if (GET_CODE (end) == JUMP_INSN)
+ {
+ if (head == end)
+ {
+ head = end = NULL_RTX;
+ goto no_body;
+ }
+ end = PREV_INSN (end);
+ }
+
+ if (HAVE_conditional_execution)
+ {
+ /* In the conditional execution case, we have things easy. We know
+ the condition is reversable. We don't have to check life info,
+ becase we're going to conditionally execute the code anyway.
+ All that's left is making sure the insns involved can actually
+ be predicated. */
+
+ rtx cond;
+
+ cond = cond_exec_get_condition (jump);
+ if (reversep)
+ cond = gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond)),
+ GET_MODE (cond), XEXP (cond, 0),
+ XEXP (cond, 1));
+
+ if (! cond_exec_process_insns (head, end, cond, 0))
+ goto cancel;
+
+ earliest = jump;
+ }
+ else
+ {
+ /* In the non-conditional execution case, we have to verify that there
+ are no trapping operations, no calls, no references to memory, and
+ that any registers modified are dead at the branch site. */
+
+ rtx insn, cond, prev;
+ regset_head merge_set_head, tmp_head, test_live_head, test_set_head;
+ regset merge_set, tmp, test_live, test_set;
+ struct propagate_block_info *pbi;
+ int i, fail = 0;
+
+ /* Check for no calls or trapping operations. */
+ for (insn = head; ; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == CALL_INSN)
+ return FALSE;
+ if (INSN_P (insn))
+ {
+ if (may_trap_p (PATTERN (insn)))
+ return FALSE;
+
+ /* ??? Even non-trapping memories such as stack frame
+ references must be avoided. For stores, we collect
+ no lifetime info; for reads, we'd have to assert
+ true_dependance false against every store in the
+ TEST range. */
+ if (for_each_rtx (&PATTERN (insn), find_memory, NULL))
+ return FALSE;
+ }
+ if (insn == end)
+ break;
+ }
+
+ if (! condjump_p (jump))
+ return FALSE;
+
+ /* Find the extent of the conditional. */
+ cond = noce_get_condition (jump, &earliest);
+ if (! cond)
+ return FALSE;
+
+ /* Collect:
+ MERGE_SET = set of registers set in MERGE_BB
+ TEST_LIVE = set of registers live at EARLIEST
+ TEST_SET = set of registers set between EARLIEST and the
+ end of the block. */
+
+ tmp = INITIALIZE_REG_SET (tmp_head);
+ merge_set = INITIALIZE_REG_SET (merge_set_head);
+ test_live = INITIALIZE_REG_SET (test_live_head);
+ test_set = INITIALIZE_REG_SET (test_set_head);
+
+ /* ??? bb->local_set is only valid during calculate_global_regs_live,
+ so we must recompute usage for MERGE_BB. Not so bad, I suppose,
+ since we've already asserted that MERGE_BB is small. */
+ propagate_block (merge_bb, tmp, merge_set, 0);
+
+ /* For small register class machines, don't lengthen lifetimes of
+ hard registers before reload. */
+ if (SMALL_REGISTER_CLASSES && ! reload_completed)
+ {
+ EXECUTE_IF_SET_IN_BITMAP
+ (merge_set, 0, i,
+ {
+ if (i < FIRST_PSEUDO_REGISTER
+ && ! fixed_regs[i]
+ && ! global_regs[i])
+ fail = 1;
+ });
+ }
+
+ /* For TEST, we're interested in a range of insns, not a whole block.
+ Moreover, we're interested in the insns live from OTHER_BB. */
+
+ COPY_REG_SET (test_live, other_bb->global_live_at_start);
+ pbi = init_propagate_block_info (test_bb, test_live, test_set, 0);
+
+ for (insn = jump; ; insn = prev)
+ {
+ prev = propagate_one_insn (pbi, insn);
+ if (insn == earliest)
+ break;
+ }
+
+ free_propagate_block_info (pbi);
+
+ /* We can perform the transformation if
+ MERGE_SET & (TEST_SET | TEST_LIVE)
+ and
+ TEST_SET & merge_bb->global_live_at_start
+ are empty. */
+
+ bitmap_operation (tmp, test_set, test_live, BITMAP_IOR);
+ bitmap_operation (tmp, tmp, merge_set, BITMAP_AND);
+ EXECUTE_IF_SET_IN_BITMAP(tmp, 0, i, fail = 1);
+
+ bitmap_operation (tmp, test_set, merge_bb->global_live_at_start,
+ BITMAP_AND);
+ EXECUTE_IF_SET_IN_BITMAP(tmp, 0, i, fail = 1);
+
+ FREE_REG_SET (tmp);
+ FREE_REG_SET (merge_set);
+ FREE_REG_SET (test_live);
+ FREE_REG_SET (test_set);
+
+ if (fail)
+ return FALSE;
+ }
+
+ no_body:
+ /* We don't want to use normal invert_jump or redirect_jump because
+ we don't want to delete_insn called. Also, we want to do our own
+ change group management. */
+
+ old_dest = JUMP_LABEL (jump);
+ if (reversep
+ ? ! invert_jump_1 (jump, new_dest)
+ : ! redirect_jump_1 (jump, new_dest))
+ goto cancel;
+
+ if (! apply_change_group ())
+ return FALSE;
+
+ if (old_dest)
+ LABEL_NUSES (old_dest) -= 1;
+ if (new_dest)
+ LABEL_NUSES (new_dest) += 1;
+ JUMP_LABEL (jump) = new_dest;
+
+ if (reversep)
+ {
+ rtx note = find_reg_note (jump, REG_BR_PROB, NULL_RTX);
+ if (note)
+ XEXP (note, 0) = GEN_INT (REG_BR_PROB_BASE - INTVAL (XEXP (note, 0)));
+ }
+
+ /* Move the insns out of MERGE_BB to before the branch. */
+ if (end == merge_bb->end)
+ merge_bb->end = merge_bb->head;
+ if (head != NULL)
+ {
+ head = squeeze_notes (head, end);
+ reorder_insns (head, end, PREV_INSN (earliest));
+ }
+ return TRUE;
+
+ cancel:
+ cancel_changes (0);
+ return FALSE;
+}
+
+/* Main entry point for all if-conversion. */
+
+void
+if_convert (life_data_ok)
+ int life_data_ok;
+{
+ int block_num;
+
+ num_possible_if_blocks = 0;
+ num_updated_if_blocks = 0;
+ num_removed_blocks = 0;
+
+ /* Free up basic_block_for_insn so that we don't have to keep it
+ up to date, either here or in merge_blocks_nomove. */
+ free_basic_block_vars (1);
+
+ /* Compute postdominators if we think we'll use them. */
+ post_dominators = NULL;
+ if (HAVE_conditional_execution || life_data_ok)
+ {
+ post_dominators = sbitmap_vector_alloc (n_basic_blocks, n_basic_blocks);
+ compute_flow_dominators (NULL, post_dominators);
+ }
+
+ /* Record initial block numbers. */
+ for (block_num = 0; block_num < n_basic_blocks; block_num++)
+ SET_ORIG_INDEX (BASIC_BLOCK (block_num), block_num);
+
+ /* Go through each of the basic blocks looking for things to convert. */
+ for (block_num = 0; block_num < n_basic_blocks; )
+ {
+ basic_block bb = BASIC_BLOCK (block_num);
+ if (find_if_header (bb))
+ block_num = bb->index;
+ else
+ block_num++;
+ }
+
+ sbitmap_vector_free (post_dominators);
+
+ if (rtl_dump_file)
+ fflush (rtl_dump_file);
+
+ /* Rebuild basic_block_for_insn for update_life_info and for gcse. */
+ compute_bb_for_insn (get_max_uid ());
+
+ /* Rebuild life info for basic blocks that require it. */
+ if (num_removed_blocks && life_data_ok)
+ {
+ sbitmap update_life_blocks = sbitmap_alloc (n_basic_blocks);
+ sbitmap_zero (update_life_blocks);
+
+ /* If we allocated new pseudos, we must resize the array for sched1. */
+ if (max_regno < max_reg_num ())
+ {
+ max_regno = max_reg_num ();
+ allocate_reg_info (max_regno, FALSE, FALSE);
+ }
+
+ for (block_num = 0; block_num < n_basic_blocks; block_num++)
+ if (UPDATE_LIFE (BASIC_BLOCK (block_num)))
+ SET_BIT (update_life_blocks, block_num);
+
+ count_or_remove_death_notes (update_life_blocks, 1);
+ update_life_info (update_life_blocks, UPDATE_LIFE_LOCAL,
+ PROP_DEATH_NOTES);
+
+ sbitmap_free (update_life_blocks);
+ }
+
+ /* Write the final stats. */
+ if (rtl_dump_file && num_possible_if_blocks > 0)
+ {
+ fprintf (rtl_dump_file,
+ "\n%d possible IF blocks searched.\n",
+ num_possible_if_blocks);
+ fprintf (rtl_dump_file,
+ "%d IF blocks converted.\n",
+ num_updated_if_blocks);
+ fprintf (rtl_dump_file,
+ "%d basic blocks deleted.\n\n\n",
+ num_removed_blocks);
+ }
+
+ verify_flow_info ();
+}
diff --git a/gcc/jump.c b/gcc/jump.c
index 3e98386..baffdf0 100644
--- a/gcc/jump.c
+++ b/gcc/jump.c
@@ -128,9 +128,6 @@ static int delete_labelref_insn PARAMS ((rtx, rtx, int));
static void mark_modified_reg PARAMS ((rtx, rtx, void *));
static void redirect_tablejump PARAMS ((rtx, rtx));
static void jump_optimize_1 PARAMS ((rtx, int, int, int, int, int));
-#if ! defined(HAVE_cc0) && ! defined(HAVE_conditional_arithmetic)
-static rtx find_insert_position PARAMS ((rtx, rtx));
-#endif
static int returnjump_p_1 PARAMS ((rtx *, void *));
static void delete_prior_computation PARAMS ((rtx, rtx));
@@ -300,9 +297,10 @@ jump_optimize_1 (f, cross_jump, noop_moves, after_regscan,
for (insn = f; insn; insn = next)
{
rtx reallabelprev;
- rtx temp, temp1, temp2 = NULL_RTX, temp3, temp4, temp5, temp6;
+ rtx temp, temp1, temp2 = NULL_RTX;
+ rtx temp4 ATTRIBUTE_UNUSED;
rtx nlabel;
- int this_is_simplejump, this_is_condjump, reversep = 0;
+ int this_is_simplejump, this_is_condjump;
int this_is_condjump_in_parallel;
next = NEXT_INSN (insn);
@@ -514,1437 +512,18 @@ jump_optimize_1 (f, cross_jump, noop_moves, after_regscan,
next = NEXT_INSN (insn);
}
- /* Simplify if (...) x = a; else x = b; by converting it
- to x = b; if (...) x = a;
- if B is sufficiently simple, the test doesn't involve X,
- and nothing in the test modifies B or X.
-
- If we have small register classes, we also can't do this if X
- is a hard register.
-
- If the "x = b;" insn has any REG_NOTES, we don't do this because
- of the possibility that we are running after CSE and there is a
- REG_EQUAL note that is only valid if the branch has already been
- taken. If we move the insn with the REG_EQUAL note, we may
- fold the comparison to always be false in a later CSE pass.
- (We could also delete the REG_NOTES when moving the insn, but it
- seems simpler to not move it.) An exception is that we can move
- the insn if the only note is a REG_EQUAL or REG_EQUIV whose
- value is the same as "b".
-
- INSN is the branch over the `else' part.
-
- We set:
-
- TEMP to the jump insn preceding "x = a;"
- TEMP1 to X
- TEMP2 to the insn that sets "x = b;"
- TEMP3 to the insn that sets "x = a;"
- TEMP4 to the set of "x = b"; */
-
- if (this_is_simplejump
- && (temp3 = prev_active_insn (insn)) != 0
- && GET_CODE (temp3) == INSN
- && (temp4 = single_set (temp3)) != 0
- && GET_CODE (temp1 = SET_DEST (temp4)) == REG
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
- && (temp2 = next_active_insn (insn)) != 0
- && GET_CODE (temp2) == INSN
- && (temp4 = single_set (temp2)) != 0
- && rtx_equal_p (SET_DEST (temp4), temp1)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && (REG_NOTES (temp2) == 0
- || ((REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUAL
- || REG_NOTE_KIND (REG_NOTES (temp2)) == REG_EQUIV)
- && XEXP (REG_NOTES (temp2), 1) == 0
- && rtx_equal_p (XEXP (REG_NOTES (temp2), 0),
- SET_SRC (temp4))))
- && (temp = prev_active_insn (temp3)) != 0
- && condjump_p (temp) && ! simplejump_p (temp)
- /* TEMP must skip over the "x = a;" insn */
- && prev_real_insn (JUMP_LABEL (temp)) == insn
- && no_labels_between_p (insn, JUMP_LABEL (temp))
- /* There must be no other entries to the "x = b;" insn. */
- && no_labels_between_p (JUMP_LABEL (temp), temp2)
- /* INSN must either branch to the insn after TEMP2 or the insn
- after TEMP2 must branch to the same place as INSN. */
- && (reallabelprev == temp2
- || ((temp5 = next_active_insn (temp2)) != 0
- && simplejump_p (temp5)
- && JUMP_LABEL (temp5) == JUMP_LABEL (insn))))
- {
- /* The test expression, X, may be a complicated test with
- multiple branches. See if we can find all the uses of
- the label that TEMP branches to without hitting a CALL_INSN
- or a jump to somewhere else. */
- rtx target = JUMP_LABEL (temp);
- int nuses = LABEL_NUSES (target);
- rtx p;
-#ifdef HAVE_cc0
- rtx q;
-#endif
-
- /* Set P to the first jump insn that goes around "x = a;". */
- for (p = temp; nuses && p; p = prev_nonnote_insn (p))
- {
- if (GET_CODE (p) == JUMP_INSN)
- {
- if (condjump_p (p) && ! simplejump_p (p)
- && JUMP_LABEL (p) == target)
- {
- nuses--;
- if (nuses == 0)
- break;
- }
- else
- break;
- }
- else if (GET_CODE (p) == CALL_INSN)
- break;
- }
-
-#ifdef HAVE_cc0
- /* We cannot insert anything between a set of cc and its use
- so if P uses cc0, we must back up to the previous insn. */
- q = prev_nonnote_insn (p);
- if (q && GET_RTX_CLASS (GET_CODE (q)) == 'i'
- && sets_cc0_p (PATTERN (q)))
- p = q;
-#endif
-
- if (p)
- p = PREV_INSN (p);
-
- /* If we found all the uses and there was no data conflict, we
- can move the assignment unless we can branch into the middle
- from somewhere. */
- if (nuses == 0 && p
- && no_labels_between_p (p, insn)
- && ! reg_referenced_between_p (temp1, p, NEXT_INSN (temp3))
- && ! reg_set_between_p (temp1, p, temp3)
- && (GET_CODE (SET_SRC (temp4)) == CONST_INT
- || ! modified_between_p (SET_SRC (temp4), p, temp2))
- /* Verify that registers used by the jump are not clobbered
- by the instruction being moved. */
- && ! regs_set_between_p (PATTERN (temp),
- PREV_INSN (temp2),
- NEXT_INSN (temp2)))
- {
- emit_insn_after_with_line_notes (PATTERN (temp2), p, temp2);
- delete_insn (temp2);
-
- /* Set NEXT to an insn that we know won't go away. */
- next = next_active_insn (insn);
-
- /* Delete the jump around the set. Note that we must do
- this before we redirect the test jumps so that it won't
- delete the code immediately following the assignment
- we moved (which might be a jump). */
-
- delete_insn (insn);
-
- /* We either have two consecutive labels or a jump to
- a jump, so adjust all the JUMP_INSNs to branch to where
- INSN branches to. */
- for (p = NEXT_INSN (p); p != next; p = NEXT_INSN (p))
- if (GET_CODE (p) == JUMP_INSN)
- redirect_jump (p, target);
-
- changed = 1;
- next = NEXT_INSN (insn);
- continue;
- }
- }
-
- /* Simplify if (...) { x = a; goto l; } x = b; by converting it
- to x = a; if (...) goto l; x = b;
- if A is sufficiently simple, the test doesn't involve X,
- and nothing in the test modifies A or X.
-
- If we have small register classes, we also can't do this if X
- is a hard register.
-
- If the "x = a;" insn has any REG_NOTES, we don't do this because
- of the possibility that we are running after CSE and there is a
- REG_EQUAL note that is only valid if the branch has already been
- taken. If we move the insn with the REG_EQUAL note, we may
- fold the comparison to always be false in a later CSE pass.
- (We could also delete the REG_NOTES when moving the insn, but it
- seems simpler to not move it.) An exception is that we can move
- the insn if the only note is a REG_EQUAL or REG_EQUIV whose
- value is the same as "a".
-
- INSN is the goto.
-
- We set:
-
- TEMP to the jump insn preceding "x = a;"
- TEMP1 to X
- TEMP2 to the insn that sets "x = b;"
- TEMP3 to the insn that sets "x = a;"
- TEMP4 to the set of "x = a"; */
-
- if (this_is_simplejump
- && (temp2 = next_active_insn (insn)) != 0
- && GET_CODE (temp2) == INSN
- && (temp4 = single_set (temp2)) != 0
- && GET_CODE (temp1 = SET_DEST (temp4)) == REG
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
- && (temp3 = prev_active_insn (insn)) != 0
- && GET_CODE (temp3) == INSN
- && (temp4 = single_set (temp3)) != 0
- && rtx_equal_p (SET_DEST (temp4), temp1)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && (REG_NOTES (temp3) == 0
- || ((REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUAL
- || REG_NOTE_KIND (REG_NOTES (temp3)) == REG_EQUIV)
- && XEXP (REG_NOTES (temp3), 1) == 0
- && rtx_equal_p (XEXP (REG_NOTES (temp3), 0),
- SET_SRC (temp4))))
- && (temp = prev_active_insn (temp3)) != 0
- && condjump_p (temp) && ! simplejump_p (temp)
- /* TEMP must skip over the "x = a;" insn */
- && prev_real_insn (JUMP_LABEL (temp)) == insn
- && no_labels_between_p (temp, insn))
- {
- rtx prev_label = JUMP_LABEL (temp);
- rtx insert_after = prev_nonnote_insn (temp);
-
-#ifdef HAVE_cc0
- /* We cannot insert anything between a set of cc and its use. */
- if (insert_after && GET_RTX_CLASS (GET_CODE (insert_after)) == 'i'
- && sets_cc0_p (PATTERN (insert_after)))
- insert_after = prev_nonnote_insn (insert_after);
-#endif
- ++LABEL_NUSES (prev_label);
-
- if (insert_after
- && no_labels_between_p (insert_after, temp)
- && ! reg_referenced_between_p (temp1, insert_after, temp3)
- && ! reg_referenced_between_p (temp1, temp3,
- NEXT_INSN (temp2))
- && ! reg_set_between_p (temp1, insert_after, temp)
- && ! modified_between_p (SET_SRC (temp4), insert_after, temp)
- /* Verify that registers used by the jump are not clobbered
- by the instruction being moved. */
- && ! regs_set_between_p (PATTERN (temp),
- PREV_INSN (temp3),
- NEXT_INSN (temp3))
- && invert_jump (temp, JUMP_LABEL (insn)))
- {
- emit_insn_after_with_line_notes (PATTERN (temp3),
- insert_after, temp3);
- delete_insn (temp3);
- delete_insn (insn);
- /* Set NEXT to an insn that we know won't go away. */
- next = temp2;
- changed = 1;
- }
- if (prev_label && --LABEL_NUSES (prev_label) == 0)
- delete_insn (prev_label);
- if (changed)
- continue;
- }
-
-#if !defined(HAVE_cc0) && !defined(HAVE_conditional_arithmetic)
-
- /* If we have if (...) x = exp; and branches are expensive,
- EXP is a single insn, does not have any side effects, cannot
- trap, and is not too costly, convert this to
- t = exp; if (...) x = t;
-
- Don't do this when we have CC0 because it is unlikely to help
- and we'd need to worry about where to place the new insn and
- the potential for conflicts. We also can't do this when we have
- notes on the insn for the same reason as above.
-
- If we have conditional arithmetic, this will make this
- harder to optimize later and isn't needed, so don't do it
- in that case either.
-
- We set:
-
- TEMP to the "x = exp;" insn.
- TEMP1 to the single set in the "x = exp;" insn.
- TEMP2 to "x". */
-
- if (! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && BRANCH_COST >= 3
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && REG_NOTES (temp) == 0
- && (reallabelprev == temp
- || ((temp2 = next_active_insn (temp)) != 0
- && simplejump_p (temp2)
- && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
- && GET_CODE (SET_SRC (temp1)) != REG
- && GET_CODE (SET_SRC (temp1)) != SUBREG
- && GET_CODE (SET_SRC (temp1)) != CONST_INT
- && ! side_effects_p (SET_SRC (temp1))
- && ! may_trap_p (SET_SRC (temp1))
- && rtx_cost (SET_SRC (temp1), SET) < 10)
- {
- rtx new = gen_reg_rtx (GET_MODE (temp2));
-
- if ((temp3 = find_insert_position (insn, temp))
- && validate_change (temp, &SET_DEST (temp1), new, 0))
- {
- next = emit_insn_after (gen_move_insn (temp2, new), insn);
- emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (temp3), temp);
- delete_insn (temp);
- reallabelprev = prev_active_insn (JUMP_LABEL (insn));
-
- if (after_regscan)
- {
- reg_scan_update (temp3, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
- }
- }
-
- /* Similarly, if it takes two insns to compute EXP but they
- have the same destination. Here TEMP3 will be the second
- insn and TEMP4 the SET from that insn. */
-
- if (! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && BRANCH_COST >= 4
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && REG_NOTES (temp) == 0
- && (temp3 = next_nonnote_insn (temp)) != 0
- && GET_CODE (temp3) == INSN
- && REG_NOTES (temp3) == 0
- && (reallabelprev == temp3
- || ((temp2 = next_active_insn (temp3)) != 0
- && simplejump_p (temp2)
- && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1), GET_CODE (temp2) == REG)
- && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
- && ! side_effects_p (SET_SRC (temp1))
- && ! may_trap_p (SET_SRC (temp1))
- && rtx_cost (SET_SRC (temp1), SET) < 10
- && (temp4 = single_set (temp3)) != 0
- && rtx_equal_p (SET_DEST (temp4), temp2)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && rtx_cost (SET_SRC (temp4), SET) < 10)
- {
- rtx new = gen_reg_rtx (GET_MODE (temp2));
-
- if ((temp5 = find_insert_position (insn, temp))
- && (temp6 = find_insert_position (insn, temp3))
- && validate_change (temp, &SET_DEST (temp1), new, 0))
- {
- /* Use the earliest of temp5 and temp6. */
- if (temp5 != insn)
- temp6 = temp5;
- next = emit_insn_after (gen_move_insn (temp2, new), insn);
- emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (temp6), temp);
- emit_insn_after_with_line_notes
- (replace_rtx (PATTERN (temp3), temp2, new),
- PREV_INSN (temp6), temp3);
- delete_insn (temp);
- delete_insn (temp3);
- reallabelprev = prev_active_insn (JUMP_LABEL (insn));
-
- if (after_regscan)
- {
- reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
- }
- }
-
- /* Finally, handle the case where two insns are used to
- compute EXP but a temporary register is used. Here we must
- ensure that the temporary register is not used anywhere else. */
-
- if (! reload_completed
- && after_regscan
- && this_is_condjump && ! this_is_simplejump
- && BRANCH_COST >= 4
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && REG_NOTES (temp) == 0
- && (temp3 = next_nonnote_insn (temp)) != 0
- && GET_CODE (temp3) == INSN
- && REG_NOTES (temp3) == 0
- && (reallabelprev == temp3
- || ((temp2 = next_active_insn (temp3)) != 0
- && simplejump_p (temp2)
- && JUMP_LABEL (temp2) == JUMP_LABEL (insn)))
- && (temp1 = single_set (temp)) != 0
- && (temp5 = SET_DEST (temp1),
- (GET_CODE (temp5) == REG
- || (GET_CODE (temp5) == SUBREG
- && (temp5 = SUBREG_REG (temp5),
- GET_CODE (temp5) == REG))))
- && REGNO (temp5) >= FIRST_PSEUDO_REGISTER
- && REGNO_FIRST_UID (REGNO (temp5)) == INSN_UID (temp)
- && REGNO_LAST_UID (REGNO (temp5)) == INSN_UID (temp3)
- && ! side_effects_p (SET_SRC (temp1))
- && ! may_trap_p (SET_SRC (temp1))
- && rtx_cost (SET_SRC (temp1), SET) < 10
- && (temp4 = single_set (temp3)) != 0
- && (temp2 = SET_DEST (temp4), GET_CODE (temp2) == REG)
- && GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp2) >= FIRST_PSEUDO_REGISTER)
- && rtx_equal_p (SET_DEST (temp4), temp2)
- && ! side_effects_p (SET_SRC (temp4))
- && ! may_trap_p (SET_SRC (temp4))
- && rtx_cost (SET_SRC (temp4), SET) < 10)
- {
- rtx new = gen_reg_rtx (GET_MODE (temp2));
-
- if ((temp5 = find_insert_position (insn, temp))
- && (temp6 = find_insert_position (insn, temp3))
- && validate_change (temp3, &SET_DEST (temp4), new, 0))
- {
- /* Use the earliest of temp5 and temp6. */
- if (temp5 != insn)
- temp6 = temp5;
- next = emit_insn_after (gen_move_insn (temp2, new), insn);
- emit_insn_after_with_line_notes (PATTERN (temp),
- PREV_INSN (temp6), temp);
- emit_insn_after_with_line_notes (PATTERN (temp3),
- PREV_INSN (temp6), temp3);
- delete_insn (temp);
- delete_insn (temp3);
- reallabelprev = prev_active_insn (JUMP_LABEL (insn));
-
- if (after_regscan)
- {
- reg_scan_update (temp6, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
- }
- }
-#endif /* HAVE_cc0 */
-
-#ifdef HAVE_conditional_arithmetic
- /* ??? This is disabled in genconfig, as this simple-minded
- transformation can incredibly lengthen register lifetimes.
-
- Consider this example:
-
- 234 (set (pc)
- (if_then_else (ne (reg:DI 149) (const_int 0 [0x0]))
- (label_ref 248) (pc)))
- 237 (set (reg/i:DI 0 $0) (const_int 1 [0x1]))
- 239 (set (pc) (label_ref 2382))
- 248 (code_label ("yybackup"))
-
- This will be transformed to:
-
- 237 (set (reg/i:DI 0 $0)
- (if_then_else:DI (eq (reg:DI 149) (const_int 0 [0x0]))
- (const_int 1 [0x1]) (reg/i:DI 0 $0)))
- 239 (set (pc)
- (if_then_else (eq (reg:DI 149) (const_int 0 [0x0]))
- (label_ref 2382) (pc)))
-
- which, from this narrow viewpoint looks fine. Except that
- between this and 3 other ocurrences of the same pattern, $0
- is now live for basically the entire function, and we'll
- get an abort in caller_save.
-
- Any replacement for this code should recall that a set of
- a register that is not live need not, and indeed should not,
- be conditionalized. Either that, or delay the transformation
- until after register allocation. */
-
- /* See if this is a conditional jump around a small number of
- instructions that we can conditionalize. Don't do this before
- the initial CSE pass or after reload.
-
- We reject any insns that have side effects or may trap.
- Strictly speaking, this is not needed since the machine may
- support conditionalizing these too, but we won't deal with that
- now. Specifically, this means that we can't conditionalize a
- CALL_INSN, which some machines, such as the ARC, can do, but
- this is a very minor optimization. */
- if (this_is_condjump && ! this_is_simplejump
- && cse_not_expected && ! reload_completed
- && BRANCH_COST > 2
- && can_reverse_comparison_p (XEXP (SET_SRC (PATTERN (insn)), 0),
- insn))
- {
- rtx ourcond = XEXP (SET_SRC (PATTERN (insn)), 0);
- int num_insns = 0;
- char *storage = (char *) oballoc (0);
- int last_insn = 0, failed = 0;
- rtx changed_jump = 0;
-
- ourcond = gen_rtx (reverse_condition (GET_CODE (ourcond)),
- VOIDmode, XEXP (ourcond, 0),
- XEXP (ourcond, 1));
-
- /* Scan forward BRANCH_COST real insns looking for the JUMP_LABEL
- of this insn. We see if we think we can conditionalize the
- insns we pass. For now, we only deal with insns that have
- one SET. We stop after an insn that modifies anything in
- OURCOND, if we have too many insns, or if we have an insn
- with a side effect or that may trip. Note that we will
- be modifying any unconditional jumps we encounter to be
- conditional; this will have the effect of also doing this
- optimization on the "else" the next time around. */
- for (temp1 = NEXT_INSN (insn);
- num_insns <= BRANCH_COST && ! failed && temp1 != 0
- && GET_CODE (temp1) != CODE_LABEL;
- temp1 = NEXT_INSN (temp1))
- {
- /* Ignore everything but an active insn. */
- if (GET_RTX_CLASS (GET_CODE (temp1)) != 'i'
- || GET_CODE (PATTERN (temp1)) == USE
- || GET_CODE (PATTERN (temp1)) == CLOBBER)
- continue;
-
- /* If this was an unconditional jump, record it since we'll
- need to remove the BARRIER if we succeed. We can only
- have one such jump since there must be a label after
- the BARRIER and it's either ours, in which case it's the
- only one or some other, in which case we'd fail.
- Likewise if it's a CALL_INSN followed by a BARRIER. */
-
- if (simplejump_p (temp1)
- || (GET_CODE (temp1) == CALL_INSN
- && NEXT_INSN (temp1) != 0
- && GET_CODE (NEXT_INSN (temp1)) == BARRIER))
- {
- if (changed_jump == 0)
- changed_jump = temp1;
- else
- changed_jump
- = gen_rtx_INSN_LIST (VOIDmode, temp1, changed_jump);
- }
-
- /* See if we are allowed another insn and if this insn
- if one we think we may be able to handle. */
- if (++num_insns > BRANCH_COST
- || last_insn
- || (((temp2 = single_set (temp1)) == 0
- || side_effects_p (SET_SRC (temp2))
- || may_trap_p (SET_SRC (temp2)))
- && GET_CODE (temp1) != CALL_INSN))
- failed = 1;
- else if (temp2 != 0)
- validate_change (temp1, &SET_SRC (temp2),
- gen_rtx_IF_THEN_ELSE
- (GET_MODE (SET_DEST (temp2)),
- copy_rtx (ourcond),
- SET_SRC (temp2), SET_DEST (temp2)),
- 1);
- else
- {
- /* This is a CALL_INSN that doesn't have a SET. */
- rtx *call_loc = &PATTERN (temp1);
-
- if (GET_CODE (*call_loc) == PARALLEL)
- call_loc = &XVECEXP (*call_loc, 0, 0);
-
- validate_change (temp1, call_loc,
- gen_rtx_IF_THEN_ELSE
- (VOIDmode, copy_rtx (ourcond),
- *call_loc, const0_rtx),
- 1);
- }
-
-
- if (modified_in_p (ourcond, temp1))
- last_insn = 1;
- }
-
- /* If we've reached our jump label, haven't failed, and all
- the changes above are valid, we can delete this jump
- insn. Also remove a BARRIER after any jump that used
- to be unconditional and remove any REG_EQUAL or REG_EQUIV
- that might have previously been present on insns we
- made conditional. */
- if (temp1 == JUMP_LABEL (insn) && ! failed
- && apply_change_group ())
- {
- for (temp1 = NEXT_INSN (insn); temp1 != JUMP_LABEL (insn);
- temp1 = NEXT_INSN (temp1))
- if (GET_RTX_CLASS (GET_CODE (temp1)) == 'i')
- for (temp2 = REG_NOTES (temp1); temp2 != 0;
- temp2 = XEXP (temp2, 1))
- if (REG_NOTE_KIND (temp2) == REG_EQUAL
- || REG_NOTE_KIND (temp2) == REG_EQUIV)
- remove_note (temp1, temp2);
-
- if (changed_jump != 0)
- {
- while (GET_CODE (changed_jump) == INSN_LIST)
- {
- delete_barrier (NEXT_INSN (XEXP (changed_jump, 0)));
- changed_jump = XEXP (changed_jump, 1);
- }
-
- delete_barrier (NEXT_INSN (changed_jump));
- }
-
- delete_insn (insn);
- changed = 1;
- continue;
- }
- else
- {
- cancel_changes (0);
- obfree (storage);
- }
- }
-#endif
- /* If branches are expensive, convert
- if (foo) bar++; to bar += (foo != 0);
- and similarly for "bar--;"
-
- INSN is the conditional branch around the arithmetic. We set:
-
- TEMP is the arithmetic insn.
- TEMP1 is the SET doing the arithmetic.
- TEMP2 is the operand being incremented or decremented.
- TEMP3 to the condition being tested.
- TEMP4 to the earliest insn used to find the condition. */
-
- if ((BRANCH_COST >= 2
-#ifdef HAVE_incscc
- || HAVE_incscc
-#endif
-#ifdef HAVE_decscc
- || HAVE_decscc
-#endif
- )
- && ! reload_completed
- && this_is_condjump && ! this_is_simplejump
- && (temp = next_nonnote_insn (insn)) != 0
- && (temp1 = single_set (temp)) != 0
- && (temp2 = SET_DEST (temp1),
- GET_MODE_CLASS (GET_MODE (temp2)) == MODE_INT)
- && GET_CODE (SET_SRC (temp1)) == PLUS
- && (XEXP (SET_SRC (temp1), 1) == const1_rtx
- || XEXP (SET_SRC (temp1), 1) == constm1_rtx)
- && rtx_equal_p (temp2, XEXP (SET_SRC (temp1), 0))
- && ! side_effects_p (temp2)
- && ! may_trap_p (temp2)
- /* INSN must either branch to the insn after TEMP or the insn
- after TEMP must branch to the same place as INSN. */
- && (reallabelprev == temp
- || ((temp3 = next_active_insn (temp)) != 0
- && simplejump_p (temp3)
- && JUMP_LABEL (temp3) == JUMP_LABEL (insn)))
- && (temp3 = get_condition (insn, &temp4)) != 0
- /* We must be comparing objects whose modes imply the size.
- We could handle BLKmode if (1) emit_store_flag could
- and (2) we could find the size reliably. */
- && GET_MODE (XEXP (temp3, 0)) != BLKmode
- && can_reverse_comparison_p (temp3, insn))
- {
- rtx temp6, target = 0, seq, init_insn = 0, init = temp2;
- enum rtx_code code = reverse_condition (GET_CODE (temp3));
-
- start_sequence ();
-
- /* It must be the case that TEMP2 is not modified in the range
- [TEMP4, INSN). The one exception we make is if the insn
- before INSN sets TEMP2 to something which is also unchanged
- in that range. In that case, we can move the initialization
- into our sequence. */
-
- if ((temp5 = prev_active_insn (insn)) != 0
- && no_labels_between_p (temp5, insn)
- && GET_CODE (temp5) == INSN
- && (temp6 = single_set (temp5)) != 0
- && rtx_equal_p (temp2, SET_DEST (temp6))
- && (CONSTANT_P (SET_SRC (temp6))
- || GET_CODE (SET_SRC (temp6)) == REG
- || GET_CODE (SET_SRC (temp6)) == SUBREG))
- {
- emit_insn (PATTERN (temp5));
- init_insn = temp5;
- init = SET_SRC (temp6);
- }
-
- if (CONSTANT_P (init)
- || ! reg_set_between_p (init, PREV_INSN (temp4), insn))
- target = emit_store_flag (gen_reg_rtx (GET_MODE (temp2)), code,
- XEXP (temp3, 0), XEXP (temp3, 1),
- VOIDmode,
- (code == LTU || code == LEU
- || code == GTU || code == GEU), 1);
-
- /* If we can do the store-flag, do the addition or
- subtraction. */
-
- if (target)
- target = expand_binop (GET_MODE (temp2),
- (XEXP (SET_SRC (temp1), 1) == const1_rtx
- ? add_optab : sub_optab),
- temp2, target, temp2, 0, OPTAB_WIDEN);
-
- if (target != 0)
- {
- /* Put the result back in temp2 in case it isn't already.
- Then replace the jump, possible a CC0-setting insn in
- front of the jump, and TEMP, with the sequence we have
- made. */
-
- if (target != temp2)
- emit_move_insn (temp2, target);
-
- seq = get_insns ();
- end_sequence ();
-
- emit_insns_before (seq, temp4);
- delete_insn (temp);
-
- if (init_insn)
- delete_insn (init_insn);
-
- next = NEXT_INSN (insn);
-#ifdef HAVE_cc0
- delete_insn (prev_nonnote_insn (insn));
-#endif
- delete_insn (insn);
-
- if (after_regscan)
- {
- reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
-
- changed = 1;
- continue;
- }
- else
- end_sequence ();
- }
-
- /* Try to use a conditional move (if the target has them), or a
- store-flag insn. If the target has conditional arithmetic as
- well as conditional move, the above code will have done something.
- Note that we prefer the above code since it is more general: the
- code below can make changes that require work to undo.
-
- The general case here is:
-
- 1) x = a; if (...) x = b; and
- 2) if (...) x = b;
-
- If the jump would be faster, the machine should not have defined
- the movcc or scc insns!. These cases are often made by the
- previous optimization.
-
- The second case is treated as x = x; if (...) x = b;.
-
- INSN here is the jump around the store. We set:
-
- TEMP to the "x op= b;" insn.
- TEMP1 to X.
- TEMP2 to B.
- TEMP3 to A (X in the second case).
- TEMP4 to the condition being tested.
- TEMP5 to the earliest insn used to find the condition.
- TEMP6 to the SET of TEMP. */
-
- if (/* We can't do this after reload has completed. */
- ! reload_completed
-#ifdef HAVE_conditional_arithmetic
- /* Defer this until after CSE so the above code gets the
- first crack at it. */
- && cse_not_expected
-#endif
- && this_is_condjump && ! this_is_simplejump
- /* Set TEMP to the "x = b;" insn. */
- && (temp = next_nonnote_insn (insn)) != 0
- && GET_CODE (temp) == INSN
- && (temp6 = single_set (temp)) != NULL_RTX
- && GET_CODE (temp1 = SET_DEST (temp6)) == REG
- && (! SMALL_REGISTER_CLASSES
- || REGNO (temp1) >= FIRST_PSEUDO_REGISTER)
- && ! side_effects_p (temp2 = SET_SRC (temp6))
- && ! may_trap_p (temp2)
- /* Allow either form, but prefer the former if both apply.
- There is no point in using the old value of TEMP1 if
- it is a register, since cse will alias them. It can
- lose if the old value were a hard register since CSE
- won't replace hard registers. Avoid using TEMP3 if
- small register classes and it is a hard register. */
- && (((temp3 = reg_set_last (temp1, insn)) != 0
- && ! (SMALL_REGISTER_CLASSES && GET_CODE (temp3) == REG
- && REGNO (temp3) < FIRST_PSEUDO_REGISTER))
- /* Make the latter case look like x = x; if (...) x = b; */
- || (temp3 = temp1, 1))
- /* INSN must either branch to the insn after TEMP or the insn
- after TEMP must branch to the same place as INSN. */
- && (reallabelprev == temp
- || ((temp4 = next_active_insn (temp)) != 0
- && simplejump_p (temp4)
- && JUMP_LABEL (temp4) == JUMP_LABEL (insn)))
- && (temp4 = get_condition (insn, &temp5)) != 0
- /* We must be comparing objects whose modes imply the size.
- We could handle BLKmode if (1) emit_store_flag could
- and (2) we could find the size reliably. */
- && GET_MODE (XEXP (temp4, 0)) != BLKmode
- /* Even if branches are cheap, the store_flag optimization
- can win when the operation to be performed can be
- expressed directly. */
-#ifdef HAVE_cc0
- /* If the previous insn sets CC0 and something else, we can't
- do this since we are going to delete that insn. */
-
- && ! ((temp6 = prev_nonnote_insn (insn)) != 0
- && GET_CODE (temp6) == INSN
- && (sets_cc0_p (PATTERN (temp6)) == -1
- || (sets_cc0_p (PATTERN (temp6)) == 1
- && FIND_REG_INC_NOTE (temp6, NULL_RTX))))
-#endif
- )
- {
-#ifdef HAVE_conditional_move
- /* First try a conditional move. */
- {
- enum rtx_code code = GET_CODE (temp4);
- rtx var = temp1;
- rtx cond0, cond1, aval, bval;
- rtx target, new_insn;
-
- /* Copy the compared variables into cond0 and cond1, so that
- any side effects performed in or after the old comparison,
- will not affect our compare which will come later. */
- /* ??? Is it possible to just use the comparison in the jump
- insn? After all, we're going to delete it. We'd have
- to modify emit_conditional_move to take a comparison rtx
- instead or write a new function. */
-
- /* We want the target to be able to simplify comparisons with
- zero (and maybe other constants as well), so don't create
- pseudos for them. There's no need to either. */
- if (GET_CODE (XEXP (temp4, 0)) == CONST_INT
- || GET_CODE (XEXP (temp4, 0)) == CONST_DOUBLE)
- cond0 = XEXP (temp4, 0);
- else
- cond0 = gen_reg_rtx (GET_MODE (XEXP (temp4, 0)));
-
- if (GET_CODE (XEXP (temp4, 1)) == CONST_INT
- || GET_CODE (XEXP (temp4, 1)) == CONST_DOUBLE)
- cond1 = XEXP (temp4, 1);
- else
- cond1 = gen_reg_rtx (GET_MODE (XEXP (temp4, 1)));
-
- /* Careful about copying these values -- an IOR or what may
- need to do other things, like clobber flags. */
- /* ??? Assume for the moment that AVAL is ok. */
- aval = temp3;
-
- start_sequence ();
-
- /* We're dealing with a single_set insn with no side effects
- on SET_SRC. We do need to be reasonably certain that if
- we need to force BVAL into a register that we won't
- clobber the flags -- general_operand should suffice. */
- if (general_operand (temp2, GET_MODE (var)))
- bval = temp2;
- else
- {
- bval = gen_reg_rtx (GET_MODE (var));
- new_insn = copy_rtx (temp);
- temp6 = single_set (new_insn);
- SET_DEST (temp6) = bval;
- emit_insn (PATTERN (new_insn));
- }
-
- target = emit_conditional_move (var, code,
- cond0, cond1, VOIDmode,
- aval, bval, GET_MODE (var),
- (code == LTU || code == GEU
- || code == LEU || code == GTU));
-
- if (target)
- {
- rtx seq1, seq2, last;
- int copy_ok;
-
- /* Save the conditional move sequence but don't emit it
- yet. On some machines, like the alpha, it is possible
- that temp5 == insn, so next generate the sequence that
- saves the compared values and then emit both
- sequences ensuring seq1 occurs before seq2. */
- seq2 = get_insns ();
- end_sequence ();
-
- /* "Now that we can't fail..." Famous last words.
- Generate the copy insns that preserve the compared
- values. */
- start_sequence ();
- emit_move_insn (cond0, XEXP (temp4, 0));
- if (cond1 != XEXP (temp4, 1))
- emit_move_insn (cond1, XEXP (temp4, 1));
- seq1 = get_insns ();
- end_sequence ();
-
- /* Validate the sequence -- this may be some weird
- bit-extract-and-test instruction for which there
- exists no complimentary bit-extract insn. */
- copy_ok = 1;
- for (last = seq1; last ; last = NEXT_INSN (last))
- if (recog_memoized (last) < 0)
- {
- copy_ok = 0;
- break;
- }
-
- if (copy_ok)
- {
- emit_insns_before (seq1, temp5);
-
- /* Insert conditional move after insn, to be sure
- that the jump and a possible compare won't be
- separated. */
- last = emit_insns_after (seq2, insn);
-
- /* ??? We can also delete the insn that sets X to A.
- Flow will do it too though. */
- delete_insn (temp);
- next = NEXT_INSN (insn);
- delete_jump (insn);
-
- if (after_regscan)
- {
- reg_scan_update (seq1, NEXT_INSN (last),
- old_max_reg);
- old_max_reg = max_reg_num ();
- }
-
- changed = 1;
- continue;
- }
- }
- else
- end_sequence ();
- }
-#endif
-
- /* That didn't work, try a store-flag insn.
-
- We further divide the cases into:
-
- 1) x = a; if (...) x = b; and either A or B is zero,
- 2) if (...) x = 0; and jumps are expensive,
- 3) x = a; if (...) x = b; and A and B are constants where all
- the set bits in A are also set in B and jumps are expensive,
- 4) x = a; if (...) x = b; and A and B non-zero, and jumps are
- more expensive, and
- 5) if (...) x = b; if jumps are even more expensive. */
-
- if (GET_MODE_CLASS (GET_MODE (temp1)) == MODE_INT
- /* We will be passing this as operand into expand_and. No
- good if it's not valid as an operand. */
- && general_operand (temp2, GET_MODE (temp2))
- && ((GET_CODE (temp3) == CONST_INT)
- /* Make the latter case look like
- x = x; if (...) x = 0; */
- || (temp3 = temp1,
- ((BRANCH_COST >= 2
- && temp2 == const0_rtx)
- || BRANCH_COST >= 3)))
- /* If B is zero, OK; if A is zero, can only do (1) if we
- can reverse the condition. See if (3) applies possibly
- by reversing the condition. Prefer reversing to (4) when
- branches are very expensive. */
- && (((BRANCH_COST >= 2
- || STORE_FLAG_VALUE == -1
- || (STORE_FLAG_VALUE == 1
- /* Check that the mask is a power of two,
- so that it can probably be generated
- with a shift. */
- && GET_CODE (temp3) == CONST_INT
- && exact_log2 (INTVAL (temp3)) >= 0))
- && (reversep = 0, temp2 == const0_rtx))
- || ((BRANCH_COST >= 2
- || STORE_FLAG_VALUE == -1
- || (STORE_FLAG_VALUE == 1
- && GET_CODE (temp2) == CONST_INT
- && exact_log2 (INTVAL (temp2)) >= 0))
- && temp3 == const0_rtx
- && (reversep = can_reverse_comparison_p (temp4, insn)))
- || (BRANCH_COST >= 2
- && GET_CODE (temp2) == CONST_INT
- && GET_CODE (temp3) == CONST_INT
- && ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp2)
- || ((INTVAL (temp2) & INTVAL (temp3)) == INTVAL (temp3)
- && (reversep = can_reverse_comparison_p (temp4,
- insn)))))
- || BRANCH_COST >= 3)
- )
- {
- enum rtx_code code = GET_CODE (temp4);
- rtx uval, cval, var = temp1;
- int normalizep;
- rtx target;
-
- /* If necessary, reverse the condition. */
- if (reversep)
- code = reverse_condition (code), uval = temp2, cval = temp3;
- else
- uval = temp3, cval = temp2;
-
- /* If CVAL is non-zero, normalize to -1. Otherwise, if UVAL
- is the constant 1, it is best to just compute the result
- directly. If UVAL is constant and STORE_FLAG_VALUE
- includes all of its bits, it is best to compute the flag
- value unnormalized and `and' it with UVAL. Otherwise,
- normalize to -1 and `and' with UVAL. */
- normalizep = (cval != const0_rtx ? -1
- : (uval == const1_rtx ? 1
- : (GET_CODE (uval) == CONST_INT
- && (INTVAL (uval) & ~STORE_FLAG_VALUE) == 0)
- ? 0 : -1));
-
- /* We will be putting the store-flag insn immediately in
- front of the comparison that was originally being done,
- so we know all the variables in TEMP4 will be valid.
- However, this might be in front of the assignment of
- A to VAR. If it is, it would clobber the store-flag
- we will be emitting.
-
- Therefore, emit into a temporary which will be copied to
- VAR immediately after TEMP. */
-
- start_sequence ();
- target = emit_store_flag (gen_reg_rtx (GET_MODE (var)), code,
- XEXP (temp4, 0), XEXP (temp4, 1),
- VOIDmode,
- (code == LTU || code == LEU
- || code == GEU || code == GTU),
- normalizep);
- if (target)
- {
- rtx seq;
- rtx before = insn;
-
- seq = get_insns ();
- end_sequence ();
-
- /* Put the store-flag insns in front of the first insn
- used to compute the condition to ensure that we
- use the same values of them as the current
- comparison. However, the remainder of the insns we
- generate will be placed directly in front of the
- jump insn, in case any of the pseudos we use
- are modified earlier. */
-
- emit_insns_before (seq, temp5);
-
- start_sequence ();
-
- /* Both CVAL and UVAL are non-zero. */
- if (cval != const0_rtx && uval != const0_rtx)
- {
- rtx tem1, tem2;
-
- tem1 = expand_and (uval, target, NULL_RTX);
- if (GET_CODE (cval) == CONST_INT
- && GET_CODE (uval) == CONST_INT
- && (INTVAL (cval) & INTVAL (uval)) == INTVAL (cval))
- tem2 = cval;
- else
- {
- tem2 = expand_unop (GET_MODE (var), one_cmpl_optab,
- target, NULL_RTX, 0);
- tem2 = expand_and (cval, tem2,
- (GET_CODE (tem2) == REG
- ? tem2 : 0));
- }
-
- /* If we usually make new pseudos, do so here. This
- turns out to help machines that have conditional
- move insns. */
- /* ??? Conditional moves have already been handled.
- This may be obsolete. */
-
- if (flag_expensive_optimizations)
- target = 0;
-
- target = expand_binop (GET_MODE (var), ior_optab,
- tem1, tem2, target,
- 1, OPTAB_WIDEN);
- }
- else if (normalizep != 1)
- {
- /* We know that either CVAL or UVAL is zero. If
- UVAL is zero, negate TARGET and `and' with CVAL.
- Otherwise, `and' with UVAL. */
- if (uval == const0_rtx)
- {
- target = expand_unop (GET_MODE (var), one_cmpl_optab,
- target, NULL_RTX, 0);
- uval = cval;
- }
-
- target = expand_and (uval, target,
- (GET_CODE (target) == REG
- && ! preserve_subexpressions_p ()
- ? target : NULL_RTX));
- }
-
- emit_move_insn (var, target);
- seq = get_insns ();
- end_sequence ();
-#ifdef HAVE_cc0
- /* If INSN uses CC0, we must not separate it from the
- insn that sets cc0. */
- if (reg_mentioned_p (cc0_rtx, PATTERN (before)))
- before = prev_nonnote_insn (before);
-#endif
- emit_insns_before (seq, before);
-
- delete_insn (temp);
- next = NEXT_INSN (insn);
- delete_jump (insn);
-
- if (after_regscan)
- {
- reg_scan_update (seq, NEXT_INSN (next), old_max_reg);
- old_max_reg = max_reg_num ();
- }
-
- changed = 1;
- continue;
- }
- else
- end_sequence ();
- }
- }
-
-
- /* Simplify if (...) x = 1; else {...} if (x) ...
- We recognize this case scanning backwards as well.
-
- TEMP is the assignment to x;
- TEMP1 is the label at the head of the second if. */
- /* ?? This should call get_condition to find the values being
- compared, instead of looking for a COMPARE insn when HAVE_cc0
- is not defined. This would allow it to work on the m88k. */
- /* ?? This optimization is only safe before cse is run if HAVE_cc0
- is not defined and the condition is tested by a separate compare
- insn. This is because the code below assumes that the result
- of the compare dies in the following branch.
-
- Not only that, but there might be other insns between the
- compare and branch whose results are live. Those insns need
- to be executed.
-
- A way to fix this is to move the insns at JUMP_LABEL (insn)
- to before INSN. If we are running before flow, they will
- be deleted if they aren't needed. But this doesn't work
- well after flow.
-
- This is really a special-case of jump threading, anyway. The
- right thing to do is to replace this and jump threading with
- much simpler code in cse.
-
- This code has been turned off in the non-cc0 case in the
- meantime. */
-
-#ifdef HAVE_cc0
- else if (this_is_simplejump
- /* Safe to skip USE and CLOBBER insns here
- since they will not be deleted. */
- && (temp = prev_active_insn (insn))
- && no_labels_between_p (temp, insn)
- && GET_CODE (temp) == INSN
- && GET_CODE (PATTERN (temp)) == SET
- && GET_CODE (SET_DEST (PATTERN (temp))) == REG
- && CONSTANT_P (SET_SRC (PATTERN (temp)))
- && (temp1 = next_active_insn (JUMP_LABEL (insn)))
- /* If we find that the next value tested is `x'
- (TEMP1 is the insn where this happens), win. */
- && GET_CODE (temp1) == INSN
- && GET_CODE (PATTERN (temp1)) == SET
-#ifdef HAVE_cc0
- /* Does temp1 `tst' the value of x? */
- && SET_SRC (PATTERN (temp1)) == SET_DEST (PATTERN (temp))
- && SET_DEST (PATTERN (temp1)) == cc0_rtx
- && (temp1 = next_nonnote_insn (temp1))
-#else
- /* Does temp1 compare the value of x against zero? */
- && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
- && XEXP (SET_SRC (PATTERN (temp1)), 1) == const0_rtx
- && (XEXP (SET_SRC (PATTERN (temp1)), 0)
- == SET_DEST (PATTERN (temp)))
- && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
- && (temp1 = find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
-#endif
- && condjump_p (temp1))
- {
- /* Get the if_then_else from the condjump. */
- rtx choice = SET_SRC (PATTERN (temp1));
- if (GET_CODE (choice) == IF_THEN_ELSE)
- {
- enum rtx_code code = GET_CODE (XEXP (choice, 0));
- rtx val = SET_SRC (PATTERN (temp));
- rtx cond
- = simplify_relational_operation (code, GET_MODE (SET_DEST (PATTERN (temp))),
- val, const0_rtx);
- rtx ultimate;
-
- if (cond == const_true_rtx)
- ultimate = XEXP (choice, 1);
- else if (cond == const0_rtx)
- ultimate = XEXP (choice, 2);
- else
- ultimate = 0;
-
- if (ultimate == pc_rtx)
- ultimate = get_label_after (temp1);
- else if (ultimate && GET_CODE (ultimate) != RETURN)
- ultimate = XEXP (ultimate, 0);
-
- if (ultimate && JUMP_LABEL(insn) != ultimate)
- changed |= redirect_jump (insn, ultimate);
- }
- }
-#endif
-
-#if 0
- /* @@ This needs a bit of work before it will be right.
-
- Any type of comparison can be accepted for the first and
- second compare. When rewriting the first jump, we must
- compute the what conditions can reach label3, and use the
- appropriate code. We can not simply reverse/swap the code
- of the first jump. In some cases, the second jump must be
- rewritten also.
-
- For example,
- < == converts to > ==
- < != converts to == >
- etc.
-
- If the code is written to only accept an '==' test for the second
- compare, then all that needs to be done is to swap the condition
- of the first branch.
-
- It is questionable whether we want this optimization anyways,
- since if the user wrote code like this because he/she knew that
- the jump to label1 is taken most of the time, then rewriting
- this gives slower code. */
- /* @@ This should call get_condition to find the values being
- compared, instead of looking for a COMPARE insn when HAVE_cc0
- is not defined. This would allow it to work on the m88k. */
- /* @@ This optimization is only safe before cse is run if HAVE_cc0
- is not defined and the condition is tested by a separate compare
- insn. This is because the code below assumes that the result
- of the compare dies in the following branch. */
-
- /* Simplify test a ~= b
- condjump label1;
- test a == b
- condjump label2;
- jump label3;
- label1:
-
- rewriting as
- test a ~~= b
- condjump label3
- test a == b
- condjump label2
- label1:
-
- where ~= is an inequality, e.g. >, and ~~= is the swapped
- inequality, e.g. <.
-
- We recognize this case scanning backwards.
-
- TEMP is the conditional jump to `label2';
- TEMP1 is the test for `a == b';
- TEMP2 is the conditional jump to `label1';
- TEMP3 is the test for `a ~= b'. */
- else if (this_is_simplejump
- && (temp = prev_active_insn (insn))
- && no_labels_between_p (temp, insn)
- && condjump_p (temp)
- && (temp1 = prev_active_insn (temp))
- && no_labels_between_p (temp1, temp)
- && GET_CODE (temp1) == INSN
- && GET_CODE (PATTERN (temp1)) == SET
-#ifdef HAVE_cc0
- && sets_cc0_p (PATTERN (temp1)) == 1
-#else
- && GET_CODE (SET_SRC (PATTERN (temp1))) == COMPARE
- && GET_CODE (SET_DEST (PATTERN (temp1))) == REG
- && (temp == find_next_ref (SET_DEST (PATTERN (temp1)), temp1))
-#endif
- && (temp2 = prev_active_insn (temp1))
- && no_labels_between_p (temp2, temp1)
- && condjump_p (temp2)
- && JUMP_LABEL (temp2) == next_nonnote_insn (NEXT_INSN (insn))
- && (temp3 = prev_active_insn (temp2))
- && no_labels_between_p (temp3, temp2)
- && GET_CODE (PATTERN (temp3)) == SET
- && rtx_equal_p (SET_DEST (PATTERN (temp3)),
- SET_DEST (PATTERN (temp1)))
- && rtx_equal_p (SET_SRC (PATTERN (temp1)),
- SET_SRC (PATTERN (temp3)))
- && ! inequality_comparisons_p (PATTERN (temp))
- && inequality_comparisons_p (PATTERN (temp2)))
- {
- rtx fallthrough_label = JUMP_LABEL (temp2);
-
- ++LABEL_NUSES (fallthrough_label);
- if (swap_jump (temp2, JUMP_LABEL (insn)))
- {
- delete_insn (insn);
- changed = 1;
- }
-
- if (--LABEL_NUSES (fallthrough_label) == 0)
- delete_insn (fallthrough_label);
- }
-#endif
- /* Simplify if (...) {... x = 1;} if (x) ...
-
- We recognize this case backwards.
-
- TEMP is the test of `x';
- TEMP1 is the assignment to `x' at the end of the
- previous statement. */
- /* @@ This should call get_condition to find the values being
- compared, instead of looking for a COMPARE insn when HAVE_cc0
- is not defined. This would allow it to work on the m88k. */
- /* @@ This optimization is only safe before cse is run if HAVE_cc0
- is not defined and the condition is tested by a separate compare
- insn. This is because the code below assumes that the result
- of the compare dies in the following branch. */
-
- /* ??? This has to be turned off. The problem is that the
- unconditional jump might indirectly end up branching to the
- label between TEMP1 and TEMP. We can't detect this, in general,
- since it may become a jump to there after further optimizations.
- If that jump is done, it will be deleted, so we will retry
- this optimization in the next pass, thus an infinite loop.
-
- The present code prevents this by putting the jump after the
- label, but this is not logically correct. */
-#if 0
- else if (this_is_condjump
- /* Safe to skip USE and CLOBBER insns here
- since they will not be deleted. */
- && (temp = prev_active_insn (insn))
- && no_labels_between_p (temp, insn)
- && GET_CODE (temp) == INSN
- && GET_CODE (PATTERN (temp)) == SET
-#ifdef HAVE_cc0
- && sets_cc0_p (PATTERN (temp)) == 1
- && GET_CODE (SET_SRC (PATTERN (temp))) == REG
-#else
- /* Temp must be a compare insn, we can not accept a register
- to register move here, since it may not be simply a
- tst insn. */
- && GET_CODE (SET_SRC (PATTERN (temp))) == COMPARE
- && XEXP (SET_SRC (PATTERN (temp)), 1) == const0_rtx
- && GET_CODE (XEXP (SET_SRC (PATTERN (temp)), 0)) == REG
- && GET_CODE (SET_DEST (PATTERN (temp))) == REG
- && insn == find_next_ref (SET_DEST (PATTERN (temp)), temp)
-#endif
- /* May skip USE or CLOBBER insns here
- for checking for opportunity, since we
- take care of them later. */
- && (temp1 = prev_active_insn (temp))
- && GET_CODE (temp1) == INSN
- && GET_CODE (PATTERN (temp1)) == SET
-#ifdef HAVE_cc0
- && SET_SRC (PATTERN (temp)) == SET_DEST (PATTERN (temp1))
-#else
- && (XEXP (SET_SRC (PATTERN (temp)), 0)
- == SET_DEST (PATTERN (temp1)))
-#endif
- && CONSTANT_P (SET_SRC (PATTERN (temp1)))
- /* If this isn't true, cse will do the job. */
- && ! no_labels_between_p (temp1, temp))
- {
- /* Get the if_then_else from the condjump. */
- rtx choice = SET_SRC (PATTERN (insn));
- if (GET_CODE (choice) == IF_THEN_ELSE
- && (GET_CODE (XEXP (choice, 0)) == EQ
- || GET_CODE (XEXP (choice, 0)) == NE))
- {
- int want_nonzero = (GET_CODE (XEXP (choice, 0)) == NE);
- rtx last_insn;
- rtx ultimate;
- rtx p;
-
- /* Get the place that condjump will jump to
- if it is reached from here. */
- if ((SET_SRC (PATTERN (temp1)) != const0_rtx)
- == want_nonzero)
- ultimate = XEXP (choice, 1);
- else
- ultimate = XEXP (choice, 2);
- /* Get it as a CODE_LABEL. */
- if (ultimate == pc_rtx)
- ultimate = get_label_after (insn);
- else
- /* Get the label out of the LABEL_REF. */
- ultimate = XEXP (ultimate, 0);
-
- /* Insert the jump immediately before TEMP, specifically
- after the label that is between TEMP1 and TEMP. */
- last_insn = PREV_INSN (temp);
-
- /* If we would be branching to the next insn, the jump
- would immediately be deleted and the re-inserted in
- a subsequent pass over the code. So don't do anything
- in that case. */
- if (next_active_insn (last_insn)
- != next_active_insn (ultimate))
- {
- emit_barrier_after (last_insn);
- p = emit_jump_insn_after (gen_jump (ultimate),
- last_insn);
- JUMP_LABEL (p) = ultimate;
- ++LABEL_NUSES (ultimate);
- if (INSN_UID (ultimate) < max_jump_chain
- && INSN_CODE (p) < max_jump_chain)
- {
- jump_chain[INSN_UID (p)]
- = jump_chain[INSN_UID (ultimate)];
- jump_chain[INSN_UID (ultimate)] = p;
- }
- changed = 1;
- continue;
- }
- }
- }
-#endif
#ifdef HAVE_trap
/* Detect a conditional jump jumping over an unconditional trap. */
- else if (HAVE_trap
- && this_is_condjump && ! this_is_simplejump
- && reallabelprev != 0
- && GET_CODE (reallabelprev) == INSN
- && GET_CODE (PATTERN (reallabelprev)) == TRAP_IF
- && TRAP_CONDITION (PATTERN (reallabelprev)) == const_true_rtx
- && prev_active_insn (reallabelprev) == insn
- && no_labels_between_p (insn, reallabelprev)
- && (temp2 = get_condition (insn, &temp4))
- && can_reverse_comparison_p (temp2, insn))
+ if (HAVE_trap
+ && this_is_condjump && ! this_is_simplejump
+ && reallabelprev != 0
+ && GET_CODE (reallabelprev) == INSN
+ && GET_CODE (PATTERN (reallabelprev)) == TRAP_IF
+ && TRAP_CONDITION (PATTERN (reallabelprev)) == const_true_rtx
+ && prev_active_insn (reallabelprev) == insn
+ && no_labels_between_p (insn, reallabelprev)
+ && (temp2 = get_condition (insn, &temp4))
+ && can_reverse_comparison_p (temp2, insn))
{
rtx new = gen_cond_trap (reverse_condition (GET_CODE (temp2)),
XEXP (temp2, 0), XEXP (temp2, 1),
@@ -5454,47 +4033,3 @@ rtx_equal_for_thread_p (x, y, yinsn)
}
return 1;
}
-
-
-#if !defined(HAVE_cc0) && !defined(HAVE_conditional_arithmetic)
-/* Return the insn that NEW can be safely inserted in front of starting at
- the jump insn INSN. Return 0 if it is not safe to do this jump
- optimization. Note that NEW must contain a single set. */
-
-static rtx
-find_insert_position (insn, new)
- rtx insn;
- rtx new;
-{
- int i;
- rtx prev;
-
- /* If NEW does not clobber, it is safe to insert NEW before INSN. */
- if (GET_CODE (PATTERN (new)) != PARALLEL)
- return insn;
-
- for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
- if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
- && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
- insn))
- break;
-
- if (i < 0)
- return insn;
-
- /* There is a good chance that the previous insn PREV sets the thing
- being clobbered (often the CC in a hard reg). If PREV does not
- use what NEW sets, we can insert NEW before PREV. */
-
- prev = prev_active_insn (insn);
- for (i = XVECLEN (PATTERN (new), 0) - 1; i >= 0; i--)
- if (GET_CODE (XVECEXP (PATTERN (new), 0, i)) == CLOBBER
- && reg_overlap_mentioned_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
- insn)
- && ! modified_in_p (XEXP (XVECEXP (PATTERN (new), 0, i), 0),
- prev))
- return 0;
-
- return reg_mentioned_p (SET_DEST (single_set (new)), prev) ? 0 : prev;
-}
-#endif /* !HAVE_cc0 */
diff --git a/gcc/timevar.def b/gcc/timevar.def
index 865d074..401719a 100644
--- a/gcc/timevar.def
+++ b/gcc/timevar.def
@@ -51,12 +51,14 @@ DEFTIMEVAR (TV_CSE2 , "CSE 2")
DEFTIMEVAR (TV_BRANCH_PROB , "branch prediction")
DEFTIMEVAR (TV_FLOW , "flow analysis")
DEFTIMEVAR (TV_COMBINE , "combiner")
+DEFTIMEVAR (TV_IFCVT , "if-conversion")
DEFTIMEVAR (TV_REGMOVE , "regmove")
DEFTIMEVAR (TV_SCHED , "scheduling")
DEFTIMEVAR (TV_LOCAL_ALLOC , "local alloc")
DEFTIMEVAR (TV_GLOBAL_ALLOC , "global alloc")
DEFTIMEVAR (TV_RELOAD_CSE_REGS , "reload CSE regs")
DEFTIMEVAR (TV_FLOW2 , "flow 2")
+DEFTIMEVAR (TV_IFCVT2 , "if-conversion 2")
DEFTIMEVAR (TV_PEEPHOLE2 , "peephole 2")
DEFTIMEVAR (TV_SCHED2 , "schedulding 2")
DEFTIMEVAR (TV_DBR_SCHED , "delay branch sched")
diff --git a/gcc/toplev.c b/gcc/toplev.c
index 1f03e3f..0d45f69 100644
--- a/gcc/toplev.c
+++ b/gcc/toplev.c
@@ -264,11 +264,13 @@ enum dump_file_index
DFI_bp,
DFI_flow,
DFI_combine,
+ DFI_ce,
DFI_regmove,
DFI_sched,
DFI_lreg,
DFI_greg,
DFI_flow2,
+ DFI_ce2,
DFI_peephole2,
DFI_sched2,
DFI_bbro,
@@ -281,7 +283,13 @@ enum dump_file_index
};
/* Describes all the dump files. Should be kept in order of the
- pass and in sync with dump_file_index above. */
+ pass and in sync with dump_file_index above.
+
+ Remaining -d letters:
+
+ " h o q u "
+ " H K OPQ TUVWXYZ"
+*/
struct dump_file_info dump_file[DFI_MAX] =
{
@@ -298,11 +306,13 @@ struct dump_file_info dump_file[DFI_MAX] =
{ "bp", 'b', 1, 0, 0 },
{ "flow", 'f', 1, 0, 0 },
{ "combine", 'c', 1, 0, 0 },
+ { "ce", 'C', 1, 0, 0 },
{ "regmove", 'N', 1, 0, 0 },
{ "sched", 'S', 1, 0, 0 },
{ "lreg", 'l', 1, 0, 0 },
{ "greg", 'g', 1, 0, 0 },
{ "flow2", 'w', 1, 0, 0 },
+ { "ce2", 'E', 1, 0, 0 },
{ "peephole2", 'z', 1, 0, 0 },
{ "sched2", 'R', 1, 0, 0 },
{ "bbro", 'B', 1, 0, 0 },
@@ -2814,12 +2824,23 @@ rest_of_compilation (decl)
}
timevar_push (TV_JUMP);
- /* Try to identify useless null pointer tests and delete them. */
- if (flag_delete_null_pointer_checks)
+
+ if (optimize > 0)
{
find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
cleanup_cfg (insns);
- delete_null_pointer_checks (insns);
+
+ /* ??? Run if-conversion before delete_null_pointer_checks,
+ since the later does not preserve the CFG. This should
+ be changed -- no since converting if's that are going to
+ be deleted. */
+ timevar_push (TV_IFCVT);
+ if_convert (0);
+ timevar_pop (TV_IFCVT);
+
+ /* Try to identify useless null pointer tests and delete them. */
+ if (flag_delete_null_pointer_checks)
+ delete_null_pointer_checks (insns);
}
/* Jump optimization, and the removal of NULL pointer checks, may
@@ -2998,11 +3019,6 @@ rest_of_compilation (decl)
ggc_collect ();
}
- /* ??? Well, nearly. If HAVE_conditional_arithmetic, jump_optimize
- has put off all if-conversion until "after CSE". If we put this
- off any longer we may miss out doing if-conversion entirely. */
- cse_not_expected = 1;
-
if (optimize > 0)
{
timevar_push (TV_CSE2);
@@ -3016,9 +3032,19 @@ rest_of_compilation (decl)
max_reg_num so we must rerun reg_scan afterwards.
??? Rework to not call reg_scan so often. */
timevar_push (TV_JUMP);
+
reg_scan (insns, max_reg_num (), 0);
jump_optimize (insns, !JUMP_CROSS_JUMP,
!JUMP_NOOP_MOVES, JUMP_AFTER_REGSCAN);
+
+ timevar_push (TV_IFCVT);
+
+ find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
+ cleanup_cfg (insns);
+ if_convert (0);
+
+ timevar_pop(TV_IFCVT);
+
timevar_pop (TV_JUMP);
reg_scan (insns, max_reg_num (), 0);
@@ -3050,6 +3076,8 @@ rest_of_compilation (decl)
ggc_collect ();
}
+ cse_not_expected = 1;
+
if (profile_arc_flag || flag_test_coverage || flag_branch_probabilities)
{
timevar_push (TV_BRANCH_PROB);
@@ -3110,10 +3138,6 @@ rest_of_compilation (decl)
if (ggc_p)
ggc_collect ();
- /* The first life analysis pass has finished. From now on we can not
- generate any new pseudos. */
- no_new_pseudos = 1;
-
/* If -opt, try combining insns through substitution. */
if (optimize > 0)
@@ -3134,6 +3158,20 @@ rest_of_compilation (decl)
timevar_push (TV_JUMP);
rebuild_jump_labels (insns);
timevar_pop (TV_JUMP);
+
+ timevar_push (TV_FLOW);
+ find_basic_blocks (insns, max_reg_num (), rtl_dump_file);
+ cleanup_cfg (insns);
+
+ /* Blimey. We've got to have the CFG up to date for the call to
+ if_convert below. However, the random deletion of blocks
+ without updating life info can wind up with Wierd Stuff in
+ global_live_at_end. We then run sched1, which updates things
+ properly, discovers the wierdness and aborts. */
+ update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES,
+ PROP_DEATH_NOTES);
+
+ timevar_pop (TV_FLOW);
}
close_dump_file (DFI_combine, print_rtl_with_bb, insns);
@@ -3143,6 +3181,19 @@ rest_of_compilation (decl)
ggc_collect ();
}
+ /* Rerun if-conversion, as combine may have simplified things enough to
+ now meet sequence length restrictions. */
+ if (optimize > 0)
+ {
+ timevar_push (TV_IFCVT);
+ open_dump_file (DFI_ce, decl);
+
+ if_convert (1);
+
+ close_dump_file (DFI_ce, print_rtl_with_bb, insns);
+ timevar_pop (TV_IFCVT);
+ }
+
/* Register allocation pre-pass, to reduce number of moves
necessary for two-address machines. */
if (optimize > 0 && (flag_regmove || flag_expensive_optimizations))
@@ -3185,6 +3236,10 @@ rest_of_compilation (decl)
if (ggc_p)
ggc_collect ();
+
+ /* Register lifetime information is up to date. From now on
+ we can not generate any new pseudos. */
+ no_new_pseudos = 1;
}
#endif
@@ -3204,7 +3259,13 @@ rest_of_compilation (decl)
/* We recomputed reg usage as part of updating the rest
of life info during sched. */
if (! flag_schedule_insns)
- recompute_reg_usage (insns, ! optimize_size);
+ {
+ recompute_reg_usage (insns, ! optimize_size);
+
+ /* Register lifetime information is up to date. From now on
+ we can not generate any new pseudos. */
+ no_new_pseudos = 1;
+ }
regclass (insns, max_reg_num (), rtl_dump_file);
rebuild_label_notes_after_reload = local_alloc ();
@@ -3317,12 +3378,23 @@ rest_of_compilation (decl)
close_dump_file (DFI_flow2, print_rtl_with_bb, insns);
timevar_pop (TV_FLOW2);
+ if (optimize > 0)
+ {
+ timevar_push (TV_IFCVT2);
+ open_dump_file (DFI_ce2, decl);
+
+ if_convert (1);
+
+ close_dump_file (DFI_ce2, print_rtl_with_bb, insns);
+ timevar_pop (TV_IFCVT2);
+ }
+
#ifdef HAVE_peephole2
if (optimize > 0 && flag_peephole2)
{
timevar_push (TV_PEEPHOLE2);
-
open_dump_file (DFI_peephole2, decl);
+
peephole2_optimize (rtl_dump_file);
close_dump_file (DFI_peephole2, print_rtl_with_bb, insns);
@@ -3387,6 +3459,8 @@ rest_of_compilation (decl)
jump_optimize (insns, JUMP_CROSS_JUMP, JUMP_NOOP_MOVES,
!JUMP_AFTER_REGSCAN);
+ /* CFG no longer kept up to date. */
+
close_dump_file (DFI_jump2, print_rtl_with_bb, insns);
timevar_pop (TV_JUMP);
}