aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog12
-rw-r--r--gcc/expmed.c251
-rw-r--r--gcc/expr.c90
3 files changed, 226 insertions, 127 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 3a0cae3..96bd8cf 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,17 @@
2009-06-28 Paolo Bonzini <bonzini@gnu.org>
+ * expr.c (expand_expr_real_1): Just use do_store_flag.
+ (do_store_flag): Drop support for TRUTH_NOT_EXPR. Use
+ emit_store_flag_force.
+ * expmed.c (emit_store_flag_force): Copy here trick
+ previously in expand_expr_real_1. Try reversing the comparison.
+ (emit_store_flag_1): Work if target is NULL.
+ (emit_store_flag): Work if target is NULL, using the result mode
+ from the comparison. Use split_comparison, restructure final part
+ to simplify conditionals.
+
+2009-06-28 Paolo Bonzini <bonzini@gnu.org>
+
* builtins.c (expand_errno_check): Use do_compare_rtx_and_jump.
* dojump.c (do_jump): Change handling of floating-point
ops to use just do_compare_and_jump.
diff --git a/gcc/expmed.c b/gcc/expmed.c
index a579c7c..9c76a6d 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -5112,14 +5112,18 @@ expand_and (enum machine_mode mode, rtx op0, rtx op1, rtx target)
emit_move_insn (target, tem);
return target;
}
-
+
/* Helper function for emit_store_flag. */
static rtx
emit_store_flag_1 (rtx target, rtx subtarget, enum machine_mode mode,
int normalizep)
{
rtx op0;
- enum machine_mode target_mode = GET_MODE (target);
+ enum machine_mode target_mode;
+
+ if (!target)
+ target = gen_reg_rtx (GET_MODE (subtarget));
+ target_mode = GET_MODE (target);
/* If we are converting to a wider mode, first convert to
TARGET_MODE, then normalize. This produces better combining
@@ -5206,9 +5210,10 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
rtx subtarget;
enum insn_code icode;
enum machine_mode compare_mode;
- enum machine_mode target_mode = GET_MODE (target);
+ enum machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
enum mode_class mclass;
- rtx tem;
+ enum rtx_code rcode;
+ rtx tem, trueval;
rtx last;
rtx pattern, comparison;
@@ -5312,10 +5317,13 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
{
subtarget = target;
+ if (!target)
+ target_mode = mode;
+
/* If the result is to be wider than OP0, it is best to convert it
first. If it is to be narrower, it is *incorrect* to convert it
first. */
- if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
+ else if (GET_MODE_SIZE (target_mode) > GET_MODE_SIZE (mode))
{
op0 = convert_modes (target_mode, mode, op0, 0);
mode = target_mode;
@@ -5372,10 +5380,12 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
continue;
}
- subtarget = target;
- if (optimize || !(insn_data[(int) icode].operand[0].predicate
- (subtarget, result_mode)))
+ if (!target
+ || optimize
+ || !(insn_data[(int) icode].operand[0].predicate (target, result_mode)))
subtarget = gen_reg_rtx (result_mode);
+ else
+ subtarget = target;
pattern = GEN_FCN (icode) (subtarget, comparison, x, y);
@@ -5393,23 +5403,118 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
last = get_last_insn ();
+ /* If we reached here, we can't do this with a scc insn, however there
+ are some comparisons that can be done in other ways. Don't do any
+ of these cases if branches are very cheap. */
+ if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
+ return 0;
+
+ /* See what we need to return. We can only return a 1, -1, or the
+ sign bit. */
+
+ if (normalizep == 0)
+ {
+ if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
+ normalizep = STORE_FLAG_VALUE;
+
+ else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
+ == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
+ ;
+ else
+ return 0;
+ }
+
/* If optimizing, use different pseudo registers for each insn, instead
of reusing the same pseudo. This leads to better CSE, but slows
down the compiler, since there are more pseudos */
subtarget = (!optimize
&& (target_mode == mode)) ? target : NULL_RTX;
+ trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
+
+ /* For floating-point comparisons, try the reverse comparison or try
+ changing the "orderedness" of the comparison. */
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ enum rtx_code first_code;
+ bool and_them;
+
+ rcode = reverse_condition_maybe_unordered (code);
+ if (can_compare_p (rcode, mode, ccp_store_flag)
+ && (code == ORDERED || code == UNORDERED
+ || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
+ || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
+ {
+ /* For the reverse comparison, use either an addition or a XOR. */
+ if ((STORE_FLAG_VALUE == 1 && normalizep == -1)
+ || (STORE_FLAG_VALUE == -1 && normalizep == 1))
+ {
+ tem = emit_store_flag (subtarget, rcode, op0, op1, mode, 0,
+ STORE_FLAG_VALUE);
+ if (tem)
+ return expand_binop (target_mode, add_optab, tem,
+ GEN_INT (normalizep),
+ target, 0, OPTAB_WIDEN);
+ }
+ else
+ {
+ tem = emit_store_flag (subtarget, rcode, op0, op1, mode, 0,
+ normalizep);
+ if (tem)
+ return expand_binop (target_mode, xor_optab, tem, trueval,
+ target, INTVAL (trueval) >= 0, OPTAB_WIDEN);
+ }
+ }
+
+ delete_insns_since (last);
+
+ /* Cannot split ORDERED and UNORDERED, only try the above trick. */
+ if (code == ORDERED || code == UNORDERED)
+ return 0;
+
+ and_them = split_comparison (code, mode, &first_code, &code);
+
+ /* If there are no NaNs, the first comparison should always fall through.
+ Effectively change the comparison to the other one. */
+ if (!HONOR_NANS (mode))
+ {
+ gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
+ return emit_store_flag (target, code, op0, op1, mode, 0, normalizep);
+ }
+
+#ifdef HAVE_conditional_move
+ /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
+ conditional move. */
+ tem = emit_store_flag (subtarget, first_code, op0, op1, mode, 0, normalizep);
+ if (tem == 0)
+ return 0;
+
+ if (and_them)
+ tem = emit_conditional_move (target, code, op0, op1, mode,
+ tem, const0_rtx, GET_MODE (tem), 0);
+ else
+ tem = emit_conditional_move (target, code, op0, op1, mode,
+ trueval, tem, GET_MODE (tem), 0);
+
+ if (tem == 0)
+ delete_insns_since (last);
+ return tem;
+#else
+ return 0;
+#endif
+ }
- /* If we reached here, we can't do this with a scc insn. However, there
- are some comparisons that can be done directly. For example, if
- this is an equality comparison of integers, we can try to exclusive-or
+ /* The remaining tricks only apply to integer comparisons. */
+
+ if (GET_MODE_CLASS (mode) != MODE_INT)
+ return 0;
+
+ /* If this is an equality comparison of integers, we can try to exclusive-or
(or subtract) the two operands and use a recursive call to try the
comparison with zero. Don't do any of these cases if branches are
very cheap. */
- if (BRANCH_COST (optimize_insn_for_speed_p (),
- false) > 0
- && GET_MODE_CLASS (mode) == MODE_INT && (code == EQ || code == NE)
- && op1 != const0_rtx)
+ if ((code == EQ || code == NE) && op1 != const0_rtx)
{
tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
OPTAB_WIDEN);
@@ -5420,9 +5525,44 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
if (tem != 0)
tem = emit_store_flag (target, code, tem, const0_rtx,
mode, unsignedp, normalizep);
- if (tem == 0)
- delete_insns_since (last);
- return tem;
+ if (tem != 0)
+ return tem;
+
+ delete_insns_since (last);
+ }
+
+ /* For integer comparisons, try the reverse comparison. However, for
+ small X and if we'd have anyway to extend, implementing "X != 0"
+ as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
+ rcode = reverse_condition (code);
+ if (can_compare_p (rcode, mode, ccp_store_flag)
+ && ! (optab_handler (cstore_optab, mode)->insn_code == CODE_FOR_nothing
+ && code == NE
+ && GET_MODE_SIZE (mode) < UNITS_PER_WORD
+ && op1 == const0_rtx))
+ {
+ /* Again, for the reverse comparison, use either an addition or a XOR. */
+ if ((STORE_FLAG_VALUE == 1 && normalizep == -1)
+ || (STORE_FLAG_VALUE == -1 && normalizep == 1))
+ {
+ tem = emit_store_flag (subtarget, rcode, op0, op1, mode, 0,
+ STORE_FLAG_VALUE);
+ if (tem != 0)
+ tem = expand_binop (target_mode, add_optab, tem,
+ GEN_INT (normalizep), target, 0, OPTAB_WIDEN);
+ }
+ else
+ {
+ tem = emit_store_flag (subtarget, rcode, op0, op1, mode, 0,
+ normalizep);
+ if (tem != 0)
+ tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
+ INTVAL (trueval) >= 0, OPTAB_WIDEN);
+ }
+
+ if (tem != 0)
+ return tem;
+ delete_insns_since (last);
}
/* Some other cases we can do are EQ, NE, LE, and GT comparisons with
@@ -5430,30 +5570,12 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
do LE and GT if branches are expensive since they are expensive on
2-operand machines. */
- if (BRANCH_COST (optimize_insn_for_speed_p (),
- false) == 0
- || GET_MODE_CLASS (mode) != MODE_INT || op1 != const0_rtx
+ if (op1 != const0_rtx
|| (code != EQ && code != NE
&& (BRANCH_COST (optimize_insn_for_speed_p (),
false) <= 1 || (code != LE && code != GT))))
return 0;
- /* See what we need to return. We can only return a 1, -1, or the
- sign bit. */
-
- if (normalizep == 0)
- {
- if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
- normalizep = STORE_FLAG_VALUE;
-
- else if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
- == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1)))
- ;
- else
- return 0;
- }
-
/* Try to put the result of the comparison in the sign bit. Assume we can't
do the necessary operation below. */
@@ -5555,7 +5677,9 @@ emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
if (tem)
{
- if (GET_MODE (tem) != target_mode)
+ if (!target)
+ ;
+ else if (GET_MODE (tem) != target_mode)
{
convert_move (target, tem, 0);
tem = target;
@@ -5579,27 +5703,68 @@ emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
enum machine_mode mode, int unsignedp, int normalizep)
{
rtx tem, label;
+ rtx trueval, falseval;
/* First see if emit_store_flag can do the job. */
tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
if (tem != 0)
return tem;
- if (normalizep == 0)
- normalizep = 1;
+ if (!target)
+ target = gen_reg_rtx (word_mode);
- /* If this failed, we have to do this with set/compare/jump/set code. */
+ /* If this failed, we have to do this with set/compare/jump/set code.
+ For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
+ trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
+ if (code == NE
+ && GET_MODE_CLASS (mode) == MODE_INT
+ && REG_P (target)
+ && op0 == target
+ && op1 == const0_rtx)
+ {
+ label = gen_label_rtx ();
+ do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp,
+ mode, NULL_RTX, NULL_RTX, label);
+ emit_move_insn (target, trueval);
+ emit_label (label);
+ return target;
+ }
if (!REG_P (target)
|| reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
target = gen_reg_rtx (GET_MODE (target));
- emit_move_insn (target, const1_rtx);
+ /* Jump in the right direction if the target cannot implement CODE
+ but can jump on its reverse condition. */
+ falseval = const0_rtx;
+ if (! can_compare_p (code, mode, ccp_jump)
+ && (! FLOAT_MODE_P (mode)
+ || code == ORDERED || code == UNORDERED
+ || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
+ || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
+ {
+ enum rtx_code rcode;
+ if (FLOAT_MODE_P (mode))
+ rcode = reverse_condition_maybe_unordered (code);
+ else
+ rcode = reverse_condition (code);
+
+ /* Canonicalize to UNORDERED for the libcall. */
+ if (can_compare_p (rcode, mode, ccp_jump)
+ || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
+ {
+ falseval = trueval;
+ trueval = const0_rtx;
+ code = rcode;
+ }
+ }
+
+ emit_move_insn (target, trueval);
label = gen_label_rtx ();
do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX,
NULL_RTX, label);
- emit_move_insn (target, const0_rtx);
+ emit_move_insn (target, falseval);
emit_label (label);
return target;
diff --git a/gcc/expr.c b/gcc/expr.c
index 4a8a4ca..d390b0a 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -9109,50 +9109,9 @@ expand_expr_real_1 (tree exp, rtx target, enum machine_mode tmode,
temp = do_store_flag (exp,
modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
tmode != VOIDmode ? tmode : mode);
- if (temp != 0)
- return temp;
-
- /* For foo != 0, load foo, and if it is nonzero load 1 instead. */
- if (code == NE_EXPR && integer_zerop (TREE_OPERAND (exp, 1))
- && original_target
- && REG_P (original_target)
- && (GET_MODE (original_target)
- == TYPE_MODE (TREE_TYPE (TREE_OPERAND (exp, 0)))))
- {
- temp = expand_expr (TREE_OPERAND (exp, 0), original_target,
- VOIDmode, EXPAND_NORMAL);
-
- /* If temp is constant, we can just compute the result. */
- if (CONST_INT_P (temp))
- {
- if (INTVAL (temp) != 0)
- emit_move_insn (target, const1_rtx);
- else
- emit_move_insn (target, const0_rtx);
-
- return target;
- }
-
- if (temp != original_target)
- {
- enum machine_mode mode1 = GET_MODE (temp);
- if (mode1 == VOIDmode)
- mode1 = tmode != VOIDmode ? tmode : mode;
-
- temp = copy_to_mode_reg (mode1, temp);
- }
-
- op1 = gen_label_rtx ();
- emit_cmp_and_jump_insns (temp, const0_rtx, EQ, NULL_RTX,
- GET_MODE (temp), unsignedp, op1);
- emit_move_insn (temp, const1_rtx);
- emit_label (op1);
- return temp;
- }
+ gcc_assert (temp);
+ return temp;
- /* If no set-flag instruction, must generate a conditional store
- into a temporary variable. Drop through and handle this
- like && and ||. */
/* Although TRUTH_{AND,OR}IF_EXPR aren't present in GIMPLE, they
are occassionally created by folding during expansion. */
case TRUTH_ANDIF_EXPR:
@@ -9751,8 +9710,7 @@ string_constant (tree arg, tree *ptr_offset)
}
/* Generate code to calculate EXP using a store-flag instruction
- and return an rtx for the result. EXP is either a comparison
- or a TRUTH_NOT_EXPR whose operand is a comparison.
+ and return an rtx for the result. EXP is a comparison.
If TARGET is nonzero, store the result there if convenient.
@@ -9774,19 +9732,9 @@ do_store_flag (tree exp, rtx target, enum machine_mode mode)
tree arg0, arg1, type;
tree tem;
enum machine_mode operand_mode;
- int invert = 0;
int unsignedp;
rtx op0, op1;
rtx subtarget = target;
- rtx result, label;
-
- /* If this is a TRUTH_NOT_EXPR, set a flag indicating we must invert the
- result at the end. We can't simply invert the test since it would
- have already been inverted if it were valid. This case occurs for
- some floating-point comparisons. */
-
- if (TREE_CODE (exp) == TRUTH_NOT_EXPR)
- invert = 1, exp = TREE_OPERAND (exp, 0);
arg0 = TREE_OPERAND (exp, 0);
arg1 = TREE_OPERAND (exp, 1);
@@ -9916,10 +9864,6 @@ do_store_flag (tree exp, rtx target, enum machine_mode mode)
target, VOIDmode, EXPAND_NORMAL);
}
- /* Now see if we are likely to be able to do this. Return if not. */
- if (! can_compare_p (code, operand_mode, ccp_store_flag))
- return 0;
-
if (! get_subtarget (target)
|| GET_MODE (subtarget) != operand_mode)
subtarget = 0;
@@ -9929,31 +9873,9 @@ do_store_flag (tree exp, rtx target, enum machine_mode mode)
if (target == 0)
target = gen_reg_rtx (mode);
- result = emit_store_flag (target, code, op0, op1,
- operand_mode, unsignedp, 1);
-
- if (result)
- {
- if (invert)
- result = expand_binop (mode, xor_optab, result, const1_rtx,
- result, 0, OPTAB_LIB_WIDEN);
- return result;
- }
-
- /* If this failed, we have to do this with set/compare/jump/set code. */
- if (!REG_P (target)
- || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
- target = gen_reg_rtx (GET_MODE (target));
-
- emit_move_insn (target, invert ? const0_rtx : const1_rtx);
- label = gen_label_rtx ();
- do_compare_rtx_and_jump (op0, op1, code, unsignedp, operand_mode, NULL_RTX,
- NULL_RTX, label);
-
- emit_move_insn (target, invert ? const1_rtx : const0_rtx);
- emit_label (label);
-
- return target;
+ /* Try a cstore if possible. */
+ return emit_store_flag_force (target, code, op0, op1,
+ operand_mode, unsignedp, 1);
}