aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/rtl.h4
-rw-r--r--gcc/simplify-rtx.c2495
3 files changed, 1280 insertions, 1230 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index b5d1314..3b6e377 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,14 @@
+2005-02-28 Paolo Bonzini <bonzini@gnu.org>
+
+ * simplify-rtx.c (simplify_unary_operation_1,
+ simplify_const_unary_operation): New, extracted from...
+ (simplify_unary_operation): ... this one.
+ (simplify_binary_operation_1,
+ simplify_const_binary_operation): New, extracted from...
+ (simplify_binary_operation): ... this one.
+ * rtl.h (simplify_const_unary_operation,
+ simplify_const_binary_operation): Add prototypes.
+
2005-02-28 Julian Brown <julian@codesourcery.com>
* config/elfos.h: Revert my patch from 2005-02-25 since it broke
diff --git a/gcc/rtl.h b/gcc/rtl.h
index b0d839a5..689e13f 100644
--- a/gcc/rtl.h
+++ b/gcc/rtl.h
@@ -1516,8 +1516,12 @@ extern int split_branch_probability;
extern rtx split_insns (rtx, rtx);
/* In simplify-rtx.c */
+extern rtx simplify_const_unary_operation (enum rtx_code, enum machine_mode,
+ rtx, enum machine_mode);
extern rtx simplify_unary_operation (enum rtx_code, enum machine_mode, rtx,
enum machine_mode);
+extern rtx simplify_const_binary_operation (enum rtx_code, enum machine_mode,
+ rtx, rtx);
extern rtx simplify_binary_operation (enum rtx_code, enum machine_mode, rtx,
rtx);
extern rtx simplify_ternary_operation (enum rtx_code, enum machine_mode,
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index a2cd612..c6b0ec8 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -60,6 +60,9 @@ static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
rtx, rtx);
static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
enum machine_mode, rtx, rtx);
+static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
+static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
+ rtx, rtx, rtx, rtx);
/* Negate a CONST_INT rtx, truncating (because a conversion from a
maximally negative number can overflow). */
@@ -355,53 +358,292 @@ rtx
simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
rtx op, enum machine_mode op_mode)
{
+ rtx trueop, tem;
+
+ if (GET_CODE (op) == CONST)
+ op = XEXP (op, 0);
+
+ trueop = avoid_constant_pool_reference (op);
+
+ tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
+ if (tem)
+ return tem;
+
+ return simplify_unary_operation_1 (code, mode, op);
+}
+
+/* Perform some simplifications we can do even if the operands
+ aren't constant. */
+static rtx
+simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
+{
+ enum rtx_code reversed;
+ rtx temp;
+
+ switch (code)
+ {
+ case NOT:
+ /* (not (not X)) == X. */
+ if (GET_CODE (op) == NOT)
+ return XEXP (op, 0);
+
+ /* (not (eq X Y)) == (ne X Y), etc. */
+ if (COMPARISON_P (op)
+ && (mode == BImode || STORE_FLAG_VALUE == -1)
+ && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
+ return simplify_gen_relational (reversed, mode, VOIDmode,
+ XEXP (op, 0), XEXP (op, 1));
+
+ /* (not (plus X -1)) can become (neg X). */
+ if (GET_CODE (op) == PLUS
+ && XEXP (op, 1) == constm1_rtx)
+ return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+
+ /* Similarly, (not (neg X)) is (plus X -1). */
+ if (GET_CODE (op) == NEG)
+ return plus_constant (XEXP (op, 0), -1);
+
+ /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
+ if (GET_CODE (op) == XOR
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && (temp = simplify_unary_operation (NOT, mode,
+ XEXP (op, 1), mode)) != 0)
+ return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+ /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
+ if (GET_CODE (op) == PLUS
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && mode_signbit_p (mode, XEXP (op, 1))
+ && (temp = simplify_unary_operation (NOT, mode,
+ XEXP (op, 1), mode)) != 0)
+ return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
+
+
+ /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
+ operands other than 1, but that is not valid. We could do a
+ similar simplification for (not (lshiftrt C X)) where C is
+ just the sign bit, but this doesn't seem common enough to
+ bother with. */
+ if (GET_CODE (op) == ASHIFT
+ && XEXP (op, 0) == const1_rtx)
+ {
+ temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
+ return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
+ }
+
+ /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
+ by reversing the comparison code if valid. */
+ if (STORE_FLAG_VALUE == -1
+ && COMPARISON_P (op)
+ && (reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN)
+ return simplify_gen_relational (reversed, mode, VOIDmode,
+ XEXP (op, 0), XEXP (op, 1));
+
+ /* (not (ashiftrt foo C)) where C is the number of bits in FOO
+ minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
+ so we can perform the above simplification. */
+
+ if (STORE_FLAG_VALUE == -1
+ && GET_CODE (op) == ASHIFTRT
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_gen_relational (GE, mode, VOIDmode,
+ XEXP (op, 0), const0_rtx);
+
+ break;
+
+ case NEG:
+ /* (neg (neg X)) == X. */
+ if (GET_CODE (op) == NEG)
+ return XEXP (op, 0);
+
+ /* (neg (plus X 1)) can become (not X). */
+ if (GET_CODE (op) == PLUS
+ && XEXP (op, 1) == const1_rtx)
+ return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
+
+ /* Similarly, (neg (not X)) is (plus X 1). */
+ if (GET_CODE (op) == NOT)
+ return plus_constant (XEXP (op, 0), 1);
+
+ /* (neg (minus X Y)) can become (minus Y X). This transformation
+ isn't safe for modes with signed zeros, since if X and Y are
+ both +0, (minus Y X) is the same as (minus X Y). If the
+ rounding mode is towards +infinity (or -infinity) then the two
+ expressions will be rounded differently. */
+ if (GET_CODE (op) == MINUS
+ && !HONOR_SIGNED_ZEROS (mode)
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+ return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
+
+ if (GET_CODE (op) == PLUS
+ && !HONOR_SIGNED_ZEROS (mode)
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+ {
+ /* (neg (plus A C)) is simplified to (minus -C A). */
+ if (GET_CODE (XEXP (op, 1)) == CONST_INT
+ || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
+ {
+ temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
+ if (temp)
+ return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
+ }
+
+ /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
+ temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+ return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
+ }
+
+ /* (neg (mult A B)) becomes (mult (neg A) B).
+ This works even for floating-point values. */
+ if (GET_CODE (op) == MULT
+ && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+ {
+ temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
+ return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
+ }
+
+ /* NEG commutes with ASHIFT since it is multiplication. Only do
+ this if we can then eliminate the NEG (e.g., if the operand
+ is a constant). */
+ if (GET_CODE (op) == ASHIFT)
+ {
+ temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
+ if (temp)
+ return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
+ }
+
+ /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
+ C is equal to the width of MODE minus 1. */
+ if (GET_CODE (op) == ASHIFTRT
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_gen_binary (LSHIFTRT, mode,
+ XEXP (op, 0), XEXP (op, 1));
+
+ /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
+ C is equal to the width of MODE minus 1. */
+ if (GET_CODE (op) == LSHIFTRT
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
+ return simplify_gen_binary (ASHIFTRT, mode,
+ XEXP (op, 0), XEXP (op, 1));
+
+ break;
+
+ case SIGN_EXTEND:
+ /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
+ becomes just the MINUS if its mode is MODE. This allows
+ folding switch statements on machines using casesi (such as
+ the VAX). */
+ if (GET_CODE (op) == TRUNCATE
+ && GET_MODE (XEXP (op, 0)) == mode
+ && GET_CODE (XEXP (op, 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
+ return XEXP (op, 0);
+
+ /* Check for a sign extension of a subreg of a promoted
+ variable, where the promotion is sign-extended, and the
+ target mode is the same as the variable's promotion. */
+ if (GET_CODE (op) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (op)
+ && ! SUBREG_PROMOTED_UNSIGNED_P (op)
+ && GET_MODE (XEXP (op, 0)) == mode)
+ return XEXP (op, 0);
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+ if (! POINTERS_EXTEND_UNSIGNED
+ && mode == Pmode && GET_MODE (op) == ptr_mode
+ && (CONSTANT_P (op)
+ || (GET_CODE (op) == SUBREG
+ && REG_P (SUBREG_REG (op))
+ && REG_POINTER (SUBREG_REG (op))
+ && GET_MODE (SUBREG_REG (op)) == Pmode)))
+ return convert_memory_address (Pmode, op);
+#endif
+ break;
+
+ case ZERO_EXTEND:
+ /* Check for a zero extension of a subreg of a promoted
+ variable, where the promotion is zero-extended, and the
+ target mode is the same as the variable's promotion. */
+ if (GET_CODE (op) == SUBREG
+ && SUBREG_PROMOTED_VAR_P (op)
+ && SUBREG_PROMOTED_UNSIGNED_P (op)
+ && GET_MODE (XEXP (op, 0)) == mode)
+ return XEXP (op, 0);
+
+#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
+ if (POINTERS_EXTEND_UNSIGNED > 0
+ && mode == Pmode && GET_MODE (op) == ptr_mode
+ && (CONSTANT_P (op)
+ || (GET_CODE (op) == SUBREG
+ && REG_P (SUBREG_REG (op))
+ && REG_POINTER (SUBREG_REG (op))
+ && GET_MODE (SUBREG_REG (op)) == Pmode)))
+ return convert_memory_address (Pmode, op);
+#endif
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* Try to compute the value of a unary operation CODE whose output mode is to
+ be MODE with input operand OP whose mode was originally OP_MODE.
+ Return zero if the value cannot be computed. */
+rtx
+simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
+ rtx op, enum machine_mode op_mode)
+{
unsigned int width = GET_MODE_BITSIZE (mode);
- rtx trueop = avoid_constant_pool_reference (op);
if (code == VEC_DUPLICATE)
{
gcc_assert (VECTOR_MODE_P (mode));
- if (GET_MODE (trueop) != VOIDmode)
+ if (GET_MODE (op) != VOIDmode)
{
- if (!VECTOR_MODE_P (GET_MODE (trueop)))
- gcc_assert (GET_MODE_INNER (mode) == GET_MODE (trueop));
+ if (!VECTOR_MODE_P (GET_MODE (op)))
+ gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
else
gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
- (GET_MODE (trueop)));
+ (GET_MODE (op)));
}
- if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
- || GET_CODE (trueop) == CONST_VECTOR)
+ if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
+ || GET_CODE (op) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
- if (GET_CODE (trueop) != CONST_VECTOR)
+ if (GET_CODE (op) != CONST_VECTOR)
for (i = 0; i < n_elts; i++)
- RTVEC_ELT (v, i) = trueop;
+ RTVEC_ELT (v, i) = op;
else
{
- enum machine_mode inmode = GET_MODE (trueop);
+ enum machine_mode inmode = GET_MODE (op);
int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
gcc_assert (in_n_elts < n_elts);
gcc_assert ((n_elts % in_n_elts) == 0);
for (i = 0; i < n_elts; i++)
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
}
return gen_rtx_CONST_VECTOR (mode, v);
}
}
- else if (GET_CODE (op) == CONST)
- return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
- if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
+ if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
{
int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
- enum machine_mode opmode = GET_MODE (trueop);
+ enum machine_mode opmode = GET_MODE (op);
int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
rtvec v = rtvec_alloc (n_elts);
@@ -411,7 +653,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
- CONST_VECTOR_ELT (trueop, i),
+ CONST_VECTOR_ELT (op, i),
GET_MODE_INNER (opmode));
if (!x)
return 0;
@@ -424,32 +666,32 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
check the wrong mode (input vs. output) for a conversion operation,
such as FIX. At some point, this should be simplified. */
- if (code == FLOAT && GET_MODE (trueop) == VOIDmode
- && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
+ if (code == FLOAT && GET_MODE (op) == VOIDmode
+ && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (trueop) == CONST_INT)
- lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
+ if (GET_CODE (op) == CONST_INT)
+ lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
- lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
+ lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
REAL_VALUE_FROM_INT (d, lv, hv, mode);
d = real_value_truncate (mode, d);
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
- else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
- && (GET_CODE (trueop) == CONST_DOUBLE
- || GET_CODE (trueop) == CONST_INT))
+ else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
+ && (GET_CODE (op) == CONST_DOUBLE
+ || GET_CODE (op) == CONST_INT))
{
HOST_WIDE_INT hv, lv;
REAL_VALUE_TYPE d;
- if (GET_CODE (trueop) == CONST_INT)
- lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
+ if (GET_CODE (op) == CONST_INT)
+ lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
else
- lv = CONST_DOUBLE_LOW (trueop), hv = CONST_DOUBLE_HIGH (trueop);
+ lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
if (op_mode == VOIDmode)
{
@@ -468,10 +710,10 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
- if (GET_CODE (trueop) == CONST_INT
+ if (GET_CODE (op) == CONST_INT
&& width <= HOST_BITS_PER_WIDE_INT && width > 0)
{
- HOST_WIDE_INT arg0 = INTVAL (trueop);
+ HOST_WIDE_INT arg0 = INTVAL (op);
HOST_WIDE_INT val;
switch (code)
@@ -594,18 +836,18 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
/* We can do some operations on integer CONST_DOUBLEs. Also allow
for a DImode operation on a CONST_INT. */
- else if (GET_MODE (trueop) == VOIDmode
+ else if (GET_MODE (op) == VOIDmode
&& width <= HOST_BITS_PER_WIDE_INT * 2
- && (GET_CODE (trueop) == CONST_DOUBLE
- || GET_CODE (trueop) == CONST_INT))
+ && (GET_CODE (op) == CONST_DOUBLE
+ || GET_CODE (op) == CONST_INT))
{
unsigned HOST_WIDE_INT l1, lv;
HOST_WIDE_INT h1, hv;
- if (GET_CODE (trueop) == CONST_DOUBLE)
- l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
+ if (GET_CODE (op) == CONST_DOUBLE)
+ l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
else
- l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
+ l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
switch (code)
{
@@ -719,11 +961,11 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
return immed_double_const (lv, hv, mode);
}
- else if (GET_CODE (trueop) == CONST_DOUBLE
+ else if (GET_CODE (op) == CONST_DOUBLE
&& GET_MODE_CLASS (mode) == MODE_FLOAT)
{
REAL_VALUE_TYPE d, t;
- REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
switch (code)
{
@@ -753,20 +995,20 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
long tmp[4];
int i;
- real_to_target (tmp, &d, GET_MODE (trueop));
+ real_to_target (tmp, &d, GET_MODE (op));
for (i = 0; i < 4; i++)
tmp[i] = ~tmp[i];
real_from_target (&d, tmp, mode);
+ break;
}
- break;
default:
gcc_unreachable ();
}
return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
}
- else if (GET_CODE (trueop) == CONST_DOUBLE
- && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
+ else if (GET_CODE (op) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
&& GET_MODE_CLASS (mode) == MODE_INT
&& width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
{
@@ -775,9 +1017,11 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
by target backends), for consistency, this routine implements the
same semantics for constant folding as used by the middle-end. */
+ /* This was formerly used only for non-IEEE float.
+ eggert@twinsun.com says it is safe for IEEE also. */
HOST_WIDE_INT xh, xl, th, tl;
REAL_VALUE_TYPE x, t;
- REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
+ REAL_VALUE_FROM_CONST_DOUBLE (x, op);
switch (code)
{
case FIX:
@@ -863,237 +1107,7 @@ simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
return immed_double_const (xl, xh, mode);
}
- /* This was formerly used only for non-IEEE float.
- eggert@twinsun.com says it is safe for IEEE also. */
- else
- {
- enum rtx_code reversed;
- rtx temp;
-
- /* There are some simplifications we can do even if the operands
- aren't constant. */
- switch (code)
- {
- case NOT:
- /* (not (not X)) == X. */
- if (GET_CODE (op) == NOT)
- return XEXP (op, 0);
-
- /* (not (eq X Y)) == (ne X Y), etc. */
- if (COMPARISON_P (op)
- && (mode == BImode || STORE_FLAG_VALUE == -1)
- && ((reversed = reversed_comparison_code (op, NULL_RTX))
- != UNKNOWN))
- return simplify_gen_relational (reversed, mode, VOIDmode,
- XEXP (op, 0), XEXP (op, 1));
-
- /* (not (plus X -1)) can become (neg X). */
- if (GET_CODE (op) == PLUS
- && XEXP (op, 1) == constm1_rtx)
- return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
-
- /* Similarly, (not (neg X)) is (plus X -1). */
- if (GET_CODE (op) == NEG)
- return plus_constant (XEXP (op, 0), -1);
-
- /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
- if (GET_CODE (op) == XOR
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && (temp = simplify_unary_operation (NOT, mode,
- XEXP (op, 1),
- mode)) != 0)
- return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
-
- /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
- if (GET_CODE (op) == PLUS
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && mode_signbit_p (mode, XEXP (op, 1))
- && (temp = simplify_unary_operation (NOT, mode,
- XEXP (op, 1),
- mode)) != 0)
- return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
-
-
-
- /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
- operands other than 1, but that is not valid. We could do a
- similar simplification for (not (lshiftrt C X)) where C is
- just the sign bit, but this doesn't seem common enough to
- bother with. */
- if (GET_CODE (op) == ASHIFT
- && XEXP (op, 0) == const1_rtx)
- {
- temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
- return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
- }
-
- /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
- by reversing the comparison code if valid. */
- if (STORE_FLAG_VALUE == -1
- && COMPARISON_P (op)
- && (reversed = reversed_comparison_code (op, NULL_RTX))
- != UNKNOWN)
- return simplify_gen_relational (reversed, mode, VOIDmode,
- XEXP (op, 0), XEXP (op, 1));
-
- /* (not (ashiftrt foo C)) where C is the number of bits in FOO
- minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
- so we can perform the above simplification. */
-
- if (STORE_FLAG_VALUE == -1
- && GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
- return simplify_gen_relational (GE, mode, VOIDmode,
- XEXP (op, 0), const0_rtx);
-
- break;
-
- case NEG:
- /* (neg (neg X)) == X. */
- if (GET_CODE (op) == NEG)
- return XEXP (op, 0);
-
- /* (neg (plus X 1)) can become (not X). */
- if (GET_CODE (op) == PLUS
- && XEXP (op, 1) == const1_rtx)
- return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
-
- /* Similarly, (neg (not X)) is (plus X 1). */
- if (GET_CODE (op) == NOT)
- return plus_constant (XEXP (op, 0), 1);
-
- /* (neg (minus X Y)) can become (minus Y X). This transformation
- isn't safe for modes with signed zeros, since if X and Y are
- both +0, (minus Y X) is the same as (minus X Y). If the
- rounding mode is towards +infinity (or -infinity) then the two
- expressions will be rounded differently. */
- if (GET_CODE (op) == MINUS
- && !HONOR_SIGNED_ZEROS (mode)
- && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
- return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
- XEXP (op, 0));
-
- if (GET_CODE (op) == PLUS
- && !HONOR_SIGNED_ZEROS (mode)
- && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
- {
- /* (neg (plus A C)) is simplified to (minus -C A). */
- if (GET_CODE (XEXP (op, 1)) == CONST_INT
- || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
- {
- temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
- mode);
- if (temp)
- return simplify_gen_binary (MINUS, mode, temp,
- XEXP (op, 0));
- }
-
- /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
- temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
- return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
- }
-
- /* (neg (mult A B)) becomes (mult (neg A) B).
- This works even for floating-point values. */
- if (GET_CODE (op) == MULT
- && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
- {
- temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
- return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
- }
-
- /* NEG commutes with ASHIFT since it is multiplication. Only do
- this if we can then eliminate the NEG (e.g., if the operand
- is a constant). */
- if (GET_CODE (op) == ASHIFT)
- {
- temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
- mode);
- if (temp)
- return simplify_gen_binary (ASHIFT, mode, temp,
- XEXP (op, 1));
- }
-
- /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
- C is equal to the width of MODE minus 1. */
- if (GET_CODE (op) == ASHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
- return simplify_gen_binary (LSHIFTRT, mode,
- XEXP (op, 0), XEXP (op, 1));
-
- /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
- C is equal to the width of MODE minus 1. */
- if (GET_CODE (op) == LSHIFTRT
- && GET_CODE (XEXP (op, 1)) == CONST_INT
- && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
- return simplify_gen_binary (ASHIFTRT, mode,
- XEXP (op, 0), XEXP (op, 1));
-
- break;
-
- case SIGN_EXTEND:
- /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
- becomes just the MINUS if its mode is MODE. This allows
- folding switch statements on machines using casesi (such as
- the VAX). */
- if (GET_CODE (op) == TRUNCATE
- && GET_MODE (XEXP (op, 0)) == mode
- && GET_CODE (XEXP (op, 0)) == MINUS
- && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
- && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
- return XEXP (op, 0);
-
- /* Check for a sign extension of a subreg of a promoted
- variable, where the promotion is sign-extended, and the
- target mode is the same as the variable's promotion. */
- if (GET_CODE (op) == SUBREG
- && SUBREG_PROMOTED_VAR_P (op)
- && ! SUBREG_PROMOTED_UNSIGNED_P (op)
- && GET_MODE (XEXP (op, 0)) == mode)
- return XEXP (op, 0);
-
-#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (! POINTERS_EXTEND_UNSIGNED
- && mode == Pmode && GET_MODE (op) == ptr_mode
- && (CONSTANT_P (op)
- || (GET_CODE (op) == SUBREG
- && REG_P (SUBREG_REG (op))
- && REG_POINTER (SUBREG_REG (op))
- && GET_MODE (SUBREG_REG (op)) == Pmode)))
- return convert_memory_address (Pmode, op);
-#endif
- break;
-
- case ZERO_EXTEND:
- /* Check for a zero extension of a subreg of a promoted
- variable, where the promotion is zero-extended, and the
- target mode is the same as the variable's promotion. */
- if (GET_CODE (op) == SUBREG
- && SUBREG_PROMOTED_VAR_P (op)
- && SUBREG_PROMOTED_UNSIGNED_P (op)
- && GET_MODE (XEXP (op, 0)) == mode)
- return XEXP (op, 0);
-
-#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
- if (POINTERS_EXTEND_UNSIGNED > 0
- && mode == Pmode && GET_MODE (op) == ptr_mode
- && (CONSTANT_P (op)
- || (GET_CODE (op) == SUBREG
- && REG_P (SUBREG_REG (op))
- && REG_POINTER (SUBREG_REG (op))
- && GET_MODE (SUBREG_REG (op)) == Pmode)))
- return convert_memory_address (Pmode, op);
-#endif
- break;
-
- default:
- break;
- }
-
- return 0;
- }
+ return NULL_RTX;
}
/* Subroutine of simplify_binary_operation to simplify a commutative,
@@ -1154,6 +1168,7 @@ simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
return 0;
}
+
/* Simplify a binary operation CODE with result mode MODE, operating on OP0
and OP1. Return 0 if no simplification is possible.
@@ -1163,9 +1178,6 @@ rtx
simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
rtx op0, rtx op1)
{
- HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
- HOST_WIDE_INT val;
- unsigned int width = GET_MODE_BITSIZE (mode);
rtx trueop0, trueop1;
rtx tem;
@@ -1186,15 +1198,832 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
trueop0 = avoid_constant_pool_reference (op0);
trueop1 = avoid_constant_pool_reference (op1);
+ tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
+ if (tem)
+ return tem;
+ return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
+}
+
+static rtx
+simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
+ rtx op0, rtx op1, rtx trueop0, rtx trueop1)
+{
+ rtx tem;
+ HOST_WIDE_INT val;
+ unsigned int width = GET_MODE_BITSIZE (mode);
+
+ /* Even if we can't compute a constant result,
+ there are some cases worth simplifying. */
+
+ switch (code)
+ {
+ case PLUS:
+ /* Maybe simplify x + 0 to x. The two expressions are equivalent
+ when x is NaN, infinite, or finite and nonzero. They aren't
+ when x is -0 and the rounding mode is not towards -infinity,
+ since (-0) + 0 is then 0. */
+ if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
+ return op0;
+
+ /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
+ transformations are safe even for IEEE. */
+ if (GET_CODE (op0) == NEG)
+ return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
+ else if (GET_CODE (op1) == NEG)
+ return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
+
+ /* (~a) + 1 -> -a */
+ if (INTEGRAL_MODE_P (mode)
+ && GET_CODE (op0) == NOT
+ && trueop1 == const1_rtx)
+ return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
+
+ /* Handle both-operands-constant cases. We can only add
+ CONST_INTs to constants since the sum of relocatable symbols
+ can't be handled by most assemblers. Don't add CONST_INT
+ to CONST_INT since overflow won't be computed properly if wider
+ than HOST_BITS_PER_WIDE_INT. */
+
+ if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
+ && GET_CODE (op1) == CONST_INT)
+ return plus_constant (op0, INTVAL (op1));
+ else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
+ && GET_CODE (op0) == CONST_INT)
+ return plus_constant (op1, INTVAL (op0));
+
+ /* See if this is something like X * C - X or vice versa or
+ if the multiplication is written as a shift. If so, we can
+ distribute and make a new multiply, shift, or maybe just
+ have X (if C is 2 in the example above). But don't make
+ something more expensive than we had before. */
+
+ if (! FLOAT_MODE_P (mode))
+ {
+ HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ rtx lhs = op0, rhs = op1;
+
+ if (GET_CODE (lhs) == NEG)
+ coeff0 = -1, lhs = XEXP (lhs, 0);
+ else if (GET_CODE (lhs) == MULT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ else if (GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (XEXP (lhs, 1)) >= 0
+ && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ lhs = XEXP (lhs, 0);
+ }
+
+ if (GET_CODE (rhs) == NEG)
+ coeff1 = -1, rhs = XEXP (rhs, 0);
+ else if (GET_CODE (rhs) == MULT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ {
+ coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ }
+ else if (GET_CODE (rhs) == ASHIFT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && INTVAL (XEXP (rhs, 1)) >= 0
+ && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ rhs = XEXP (rhs, 0);
+ }
+
+ if (rtx_equal_p (lhs, rhs))
+ {
+ rtx orig = gen_rtx_PLUS (mode, op0, op1);
+ tem = simplify_gen_binary (MULT, mode, lhs,
+ GEN_INT (coeff0 + coeff1));
+ return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ ? tem : 0;
+ }
+ }
+
+ /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
+ if ((GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE)
+ && GET_CODE (op0) == XOR
+ && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
+ && mode_signbit_p (mode, op1))
+ return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
+ simplify_gen_binary (XOR, mode, op1,
+ XEXP (op0, 1)));
+
+ /* If one of the operands is a PLUS or a MINUS, see if we can
+ simplify this by the associative law.
+ Don't use the associative law for floating point.
+ The inaccuracy makes it nonassociative,
+ and subtle programs can break if operations are associated. */
+
+ if (INTEGRAL_MODE_P (mode)
+ && (plus_minus_operand_p (op0)
+ || plus_minus_operand_p (op1))
+ && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
+ return tem;
+
+ /* Reassociate floating point addition only when the user
+ specifies unsafe math optimizations. */
+ if (FLOAT_MODE_P (mode)
+ && flag_unsafe_math_optimizations)
+ {
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ }
+ break;
+
+ case COMPARE:
+#ifdef HAVE_cc0
+ /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
+ using cc0, in which case we want to leave it as a COMPARE
+ so we can distinguish it from a register-register-copy.
+
+ In IEEE floating point, x-0 is not the same as x. */
+
+ if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
+ || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
+ && trueop1 == CONST0_RTX (mode))
+ return op0;
+#endif
+
+ /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
+ if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
+ || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
+ && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
+ {
+ rtx xop00 = XEXP (op0, 0);
+ rtx xop10 = XEXP (op1, 0);
+
+#ifdef HAVE_cc0
+ if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
+#else
+ if (REG_P (xop00) && REG_P (xop10)
+ && GET_MODE (xop00) == GET_MODE (xop10)
+ && REGNO (xop00) == REGNO (xop10)
+ && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
+ && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
+#endif
+ return xop00;
+ }
+ break;
+
+ case MINUS:
+ /* We can't assume x-x is 0 even with non-IEEE floating point,
+ but since it is zero except in very strange circumstances, we
+ will treat it as zero with -funsafe-math-optimizations. */
+ if (rtx_equal_p (trueop0, trueop1)
+ && ! side_effects_p (op0)
+ && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
+ return CONST0_RTX (mode);
+
+ /* Change subtraction from zero into negation. (0 - x) is the
+ same as -x when x is NaN, infinite, or finite and nonzero.
+ But if the mode has signed zeros, and does not round towards
+ -infinity, then 0 - 0 is 0, not -0. */
+ if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
+ return simplify_gen_unary (NEG, mode, op1, mode);
+
+ /* (-1 - a) is ~a. */
+ if (trueop0 == constm1_rtx)
+ return simplify_gen_unary (NOT, mode, op1, mode);
+
+ /* Subtracting 0 has no effect unless the mode has signed zeros
+ and supports rounding towards -infinity. In such a case,
+ 0 - 0 is -0. */
+ if (!(HONOR_SIGNED_ZEROS (mode)
+ && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
+ && trueop1 == CONST0_RTX (mode))
+ return op0;
+
+ /* See if this is something like X * C - X or vice versa or
+ if the multiplication is written as a shift. If so, we can
+ distribute and make a new multiply, shift, or maybe just
+ have X (if C is 2 in the example above). But don't make
+ something more expensive than we had before. */
+
+ if (! FLOAT_MODE_P (mode))
+ {
+ HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
+ rtx lhs = op0, rhs = op1;
+
+ if (GET_CODE (lhs) == NEG)
+ coeff0 = -1, lhs = XEXP (lhs, 0);
+ else if (GET_CODE (lhs) == MULT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
+ {
+ coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
+ }
+ else if (GET_CODE (lhs) == ASHIFT
+ && GET_CODE (XEXP (lhs, 1)) == CONST_INT
+ && INTVAL (XEXP (lhs, 1)) >= 0
+ && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
+ lhs = XEXP (lhs, 0);
+ }
+
+ if (GET_CODE (rhs) == NEG)
+ coeff1 = - 1, rhs = XEXP (rhs, 0);
+ else if (GET_CODE (rhs) == MULT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
+ {
+ coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
+ }
+ else if (GET_CODE (rhs) == ASHIFT
+ && GET_CODE (XEXP (rhs, 1)) == CONST_INT
+ && INTVAL (XEXP (rhs, 1)) >= 0
+ && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
+ {
+ coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
+ rhs = XEXP (rhs, 0);
+ }
+
+ if (rtx_equal_p (lhs, rhs))
+ {
+ rtx orig = gen_rtx_MINUS (mode, op0, op1);
+ tem = simplify_gen_binary (MULT, mode, lhs,
+ GEN_INT (coeff0 - coeff1));
+ return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
+ ? tem : 0;
+ }
+ }
+
+ /* (a - (-b)) -> (a + b). True even for IEEE. */
+ if (GET_CODE (op1) == NEG)
+ return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
+
+ /* (-x - c) may be simplified as (-c - x). */
+ if (GET_CODE (op0) == NEG
+ && (GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE))
+ {
+ tem = simplify_unary_operation (NEG, mode, op1, mode);
+ if (tem)
+ return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
+ }
+
+ /* If one of the operands is a PLUS or a MINUS, see if we can
+ simplify this by the associative law.
+ Don't use the associative law for floating point.
+ The inaccuracy makes it nonassociative,
+ and subtle programs can break if operations are associated. */
+
+ if (INTEGRAL_MODE_P (mode)
+ && (plus_minus_operand_p (op0)
+ || plus_minus_operand_p (op1))
+ && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
+ return tem;
+
+ /* Don't let a relocatable value get a negative coeff. */
+ if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
+ return simplify_gen_binary (PLUS, mode,
+ op0,
+ neg_const_int (mode, op1));
+
+ /* (x - (x & y)) -> (x & ~y) */
+ if (GET_CODE (op1) == AND)
+ {
+ if (rtx_equal_p (op0, XEXP (op1, 0)))
+ {
+ tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
+ GET_MODE (XEXP (op1, 1)));
+ return simplify_gen_binary (AND, mode, op0, tem);
+ }
+ if (rtx_equal_p (op0, XEXP (op1, 1)))
+ {
+ tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
+ GET_MODE (XEXP (op1, 0)));
+ return simplify_gen_binary (AND, mode, op0, tem);
+ }
+ }
+ break;
+
+ case MULT:
+ if (trueop1 == constm1_rtx)
+ return simplify_gen_unary (NEG, mode, op0, mode);
+
+ /* Maybe simplify x * 0 to 0. The reduction is not valid if
+ x is NaN, since x * 0 is then also NaN. Nor is it valid
+ when the mode has signed zeros, since multiplying a negative
+ number by 0 will give -0, not 0. */
+ if (!HONOR_NANS (mode)
+ && !HONOR_SIGNED_ZEROS (mode)
+ && trueop1 == CONST0_RTX (mode)
+ && ! side_effects_p (op0))
+ return op1;
+
+ /* In IEEE floating point, x*1 is not equivalent to x for
+ signalling NaNs. */
+ if (!HONOR_SNANS (mode)
+ && trueop1 == CONST1_RTX (mode))
+ return op0;
+
+ /* Convert multiply by constant power of two into shift unless
+ we are still generating RTL. This test is a kludge. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && (val = exact_log2 (INTVAL (trueop1))) >= 0
+ /* If the mode is larger than the host word size, and the
+ uppermost bit is set, then this isn't a power of two due
+ to implicit sign extension. */
+ && (width <= HOST_BITS_PER_WIDE_INT
+ || val != HOST_BITS_PER_WIDE_INT - 1))
+ return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
+
+ /* x*2 is x+x and x*(-1) is -x */
+ if (GET_CODE (trueop1) == CONST_DOUBLE
+ && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
+ && GET_MODE (op0) == mode)
+ {
+ REAL_VALUE_TYPE d;
+ REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
+
+ if (REAL_VALUES_EQUAL (d, dconst2))
+ return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
+
+ if (REAL_VALUES_EQUAL (d, dconstm1))
+ return simplify_gen_unary (NEG, mode, op0, mode);
+ }
+
+ /* Reassociate multiplication, but for floating point MULTs
+ only when the user specifies unsafe math optimizations. */
+ if (! FLOAT_MODE_P (mode)
+ || flag_unsafe_math_optimizations)
+ {
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ }
+ break;
+
+ case IOR:
+ if (trueop1 == const0_rtx)
+ return op0;
+ if (GET_CODE (trueop1) == CONST_INT
+ && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
+ == GET_MODE_MASK (mode)))
+ return op1;
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ return op0;
+ /* A | (~A) -> -1 */
+ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+ || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+ && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return constm1_rtx;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ break;
+
+ case XOR:
+ if (trueop1 == const0_rtx)
+ return op0;
+ if (GET_CODE (trueop1) == CONST_INT
+ && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
+ == GET_MODE_MASK (mode)))
+ return simplify_gen_unary (NOT, mode, op0, mode);
+ if (trueop0 == trueop1
+ && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return const0_rtx;
+
+ /* Canonicalize XOR of the most significant bit to PLUS. */
+ if ((GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE)
+ && mode_signbit_p (mode, op1))
+ return simplify_gen_binary (PLUS, mode, op0, op1);
+ /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
+ if ((GET_CODE (op1) == CONST_INT
+ || GET_CODE (op1) == CONST_DOUBLE)
+ && GET_CODE (op0) == PLUS
+ && (GET_CODE (XEXP (op0, 1)) == CONST_INT
+ || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
+ && mode_signbit_p (mode, XEXP (op0, 1)))
+ return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
+ simplify_gen_binary (XOR, mode, op1,
+ XEXP (op0, 1)));
+
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ break;
+
+ case AND:
+ if (trueop1 == const0_rtx && ! side_effects_p (op0))
+ return const0_rtx;
+ /* If we are turning off bits already known off in OP0, we need
+ not do an AND. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
+ return op0;
+ if (trueop0 == trueop1 && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return op0;
+ /* A & (~A) -> 0 */
+ if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
+ || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
+ && ! side_effects_p (op0)
+ && GET_MODE_CLASS (mode) != MODE_CC)
+ return const0_rtx;
+
+ /* Transform (and (extend X) C) into (zero_extend (and X C)) if
+ there are no nonzero bits of C outside of X's mode. */
+ if ((GET_CODE (op0) == SIGN_EXTEND
+ || GET_CODE (op0) == ZERO_EXTEND)
+ && GET_CODE (trueop1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
+ & INTVAL (trueop1)) == 0)
+ {
+ enum machine_mode imode = GET_MODE (XEXP (op0, 0));
+ tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
+ gen_int_mode (INTVAL (trueop1),
+ imode));
+ return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
+ }
+
+ /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
+ ((A & N) + B) & M -> (A + B) & M
+ Similarly if (N & M) == 0,
+ ((A | N) + B) & M -> (A + B) & M
+ and for - instead of + and/or ^ instead of |. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ && ~INTVAL (trueop1)
+ && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
+ && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
+ {
+ rtx pmop[2];
+ int which;
+
+ pmop[0] = XEXP (op0, 0);
+ pmop[1] = XEXP (op0, 1);
+
+ for (which = 0; which < 2; which++)
+ {
+ tem = pmop[which];
+ switch (GET_CODE (tem))
+ {
+ case AND:
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+ && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
+ == INTVAL (trueop1))
+ pmop[which] = XEXP (tem, 0);
+ break;
+ case IOR:
+ case XOR:
+ if (GET_CODE (XEXP (tem, 1)) == CONST_INT
+ && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
+ pmop[which] = XEXP (tem, 0);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
+ {
+ tem = simplify_gen_binary (GET_CODE (op0), mode,
+ pmop[0], pmop[1]);
+ return simplify_gen_binary (code, mode, tem, op1);
+ }
+ }
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ break;
+
+ case UDIV:
+ /* 0/x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x/1 is x. */
+ if (trueop1 == const1_rtx)
+ {
+ /* Handle narrowing UDIV. */
+ rtx x = gen_lowpart_common (mode, op0);
+ if (x)
+ return x;
+ if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+ return gen_lowpart_SUBREG (mode, op0);
+ return op0;
+ }
+ /* Convert divide by power of two into shift. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && (val = exact_log2 (INTVAL (trueop1))) > 0)
+ return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
+ break;
+
+ case DIV:
+ /* Handle floating point and integers separately. */
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ /* Maybe change 0.0 / x to 0.0. This transformation isn't
+ safe for modes with NaNs, since 0.0 / 0.0 will then be
+ NaN rather than 0.0. Nor is it safe for modes with signed
+ zeros, since dividing 0 by a negative number gives -0.0 */
+ if (trueop0 == CONST0_RTX (mode)
+ && !HONOR_NANS (mode)
+ && !HONOR_SIGNED_ZEROS (mode)
+ && ! side_effects_p (op1))
+ return op0;
+ /* x/1.0 is x. */
+ if (trueop1 == CONST1_RTX (mode)
+ && !HONOR_SNANS (mode))
+ return op0;
+
+ if (GET_CODE (trueop1) == CONST_DOUBLE
+ && trueop1 != CONST0_RTX (mode))
+ {
+ REAL_VALUE_TYPE d;
+ REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
+
+ /* x/-1.0 is -x. */
+ if (REAL_VALUES_EQUAL (d, dconstm1)
+ && !HONOR_SNANS (mode))
+ return simplify_gen_unary (NEG, mode, op0, mode);
+
+ /* Change FP division by a constant into multiplication.
+ Only do this with -funsafe-math-optimizations. */
+ if (flag_unsafe_math_optimizations
+ && !REAL_VALUES_EQUAL (d, dconst0))
+ {
+ REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
+ tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
+ return simplify_gen_binary (MULT, mode, op0, tem);
+ }
+ }
+ }
+ else
+ {
+ /* 0/x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x/1 is x. */
+ if (trueop1 == const1_rtx)
+ {
+ /* Handle narrowing DIV. */
+ rtx x = gen_lowpart_common (mode, op0);
+ if (x)
+ return x;
+ if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+ return gen_lowpart_SUBREG (mode, op0);
+ return op0;
+ }
+ /* x/-1 is -x. */
+ if (trueop1 == constm1_rtx)
+ {
+ rtx x = gen_lowpart_common (mode, op0);
+ if (!x)
+ x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
+ ? gen_lowpart_SUBREG (mode, op0) : op0;
+ return simplify_gen_unary (NEG, mode, x, mode);
+ }
+ }
+ break;
+
+ case UMOD:
+ /* 0%x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x%1 is 0 (of x&0 if x has side-effects). */
+ if (trueop1 == const1_rtx)
+ return side_effects_p (op0)
+ ? simplify_gen_binary (AND, mode, op0, const0_rtx)
+ : const0_rtx;
+ /* Implement modulus by power of two as AND. */
+ if (GET_CODE (trueop1) == CONST_INT
+ && exact_log2 (INTVAL (trueop1)) > 0)
+ return simplify_gen_binary (AND, mode, op0,
+ GEN_INT (INTVAL (op1) - 1));
+ break;
+
+ case MOD:
+ /* 0%x is 0 (or x&0 if x has side-effects). */
+ if (trueop0 == const0_rtx)
+ return side_effects_p (op1)
+ ? simplify_gen_binary (AND, mode, op1, const0_rtx)
+ : const0_rtx;
+ /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
+ if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
+ return side_effects_p (op0)
+ ? simplify_gen_binary (AND, mode, op0, const0_rtx)
+ : const0_rtx;
+ break;
+
+ case ROTATERT:
+ case ROTATE:
+ case ASHIFTRT:
+ /* Rotating ~0 always results in ~0. */
+ if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
+ && ! side_effects_p (op1))
+ return op0;
+
+ /* Fall through.... */
+
+ case ASHIFT:
+ case LSHIFTRT:
+ if (trueop1 == const0_rtx)
+ return op0;
+ if (trueop0 == const0_rtx && ! side_effects_p (op1))
+ return op0;
+ break;
+
+ case SMIN:
+ if (width <= HOST_BITS_PER_WIDE_INT
+ && GET_CODE (trueop1) == CONST_INT
+ && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
+ && ! side_effects_p (op0))
+ return op1;
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ break;
+
+ case SMAX:
+ if (width <= HOST_BITS_PER_WIDE_INT
+ && GET_CODE (trueop1) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
+ == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
+ && ! side_effects_p (op0))
+ return op1;
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ break;
+
+ case UMIN:
+ if (trueop1 == const0_rtx && ! side_effects_p (op0))
+ return op1;
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ break;
+
+ case UMAX:
+ if (trueop1 == constm1_rtx && ! side_effects_p (op0))
+ return op1;
+ if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
+ return op0;
+ tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+ break;
+
+ case SS_PLUS:
+ case US_PLUS:
+ case SS_MINUS:
+ case US_MINUS:
+ /* ??? There are simplifications that can be done. */
+ return 0;
+
+ case VEC_SELECT:
+ if (!VECTOR_MODE_P (mode))
+ {
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
+ gcc_assert (XVECLEN (trueop1, 0) == 1);
+ gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
+
+ if (GET_CODE (trueop0) == CONST_VECTOR)
+ return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
+ (trueop1, 0, 0)));
+ }
+ else
+ {
+ gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (GET_MODE (trueop0)));
+ gcc_assert (GET_CODE (trueop1) == PARALLEL);
+
+ if (GET_CODE (trueop0) == CONST_VECTOR)
+ {
+ int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ rtvec v = rtvec_alloc (n_elts);
+ unsigned int i;
+
+ gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
+ for (i = 0; i < n_elts; i++)
+ {
+ rtx x = XVECEXP (trueop1, 0, i);
+
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
+ INTVAL (x));
+ }
+
+ return gen_rtx_CONST_VECTOR (mode, v);
+ }
+ }
+ return 0;
+ case VEC_CONCAT:
+ {
+ enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
+ ? GET_MODE (trueop0)
+ : GET_MODE_INNER (mode));
+ enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
+ ? GET_MODE (trueop1)
+ : GET_MODE_INNER (mode));
+
+ gcc_assert (VECTOR_MODE_P (mode));
+ gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
+ == GET_MODE_SIZE (mode));
+
+ if (VECTOR_MODE_P (op0_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op0_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op0_mode);
+
+ if (VECTOR_MODE_P (op1_mode))
+ gcc_assert (GET_MODE_INNER (mode)
+ == GET_MODE_INNER (op1_mode));
+ else
+ gcc_assert (GET_MODE_INNER (mode) == op1_mode);
+
+ if ((GET_CODE (trueop0) == CONST_VECTOR
+ || GET_CODE (trueop0) == CONST_INT
+ || GET_CODE (trueop0) == CONST_DOUBLE)
+ && (GET_CODE (trueop1) == CONST_VECTOR
+ || GET_CODE (trueop1) == CONST_INT
+ || GET_CODE (trueop1) == CONST_DOUBLE))
+ {
+ int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
+ unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
+ rtvec v = rtvec_alloc (n_elts);
+ unsigned int i;
+ unsigned in_n_elts = 1;
+
+ if (VECTOR_MODE_P (op0_mode))
+ in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
+ for (i = 0; i < n_elts; i++)
+ {
+ if (i < in_n_elts)
+ {
+ if (!VECTOR_MODE_P (op0_mode))
+ RTVEC_ELT (v, i) = trueop0;
+ else
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
+ }
+ else
+ {
+ if (!VECTOR_MODE_P (op1_mode))
+ RTVEC_ELT (v, i) = trueop1;
+ else
+ RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
+ i - in_n_elts);
+ }
+ }
+
+ return gen_rtx_CONST_VECTOR (mode, v);
+ }
+ }
+ return 0;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return 0;
+}
+
+rtx
+simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
+ rtx op0, rtx op1)
+{
+ HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
+ HOST_WIDE_INT val;
+ unsigned int width = GET_MODE_BITSIZE (mode);
+
if (VECTOR_MODE_P (mode)
&& code != VEC_CONCAT
- && GET_CODE (trueop0) == CONST_VECTOR
- && GET_CODE (trueop1) == CONST_VECTOR)
+ && GET_CODE (op0) == CONST_VECTOR
+ && GET_CODE (op1) == CONST_VECTOR)
{
unsigned n_elts = GET_MODE_NUNITS (mode);
- enum machine_mode op0mode = GET_MODE (trueop0);
+ enum machine_mode op0mode = GET_MODE (op0);
unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
- enum machine_mode op1mode = GET_MODE (trueop1);
+ enum machine_mode op1mode = GET_MODE (op1);
unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
rtvec v = rtvec_alloc (n_elts);
unsigned int i;
@@ -1204,8 +2033,8 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
for (i = 0; i < n_elts; i++)
{
rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
- CONST_VECTOR_ELT (trueop0, i),
- CONST_VECTOR_ELT (trueop1, i));
+ CONST_VECTOR_ELT (op0, i),
+ CONST_VECTOR_ELT (op1, i));
if (!x)
return 0;
RTVEC_ELT (v, i) = x;
@@ -1216,7 +2045,7 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
if (VECTOR_MODE_P (mode)
&& code == VEC_CONCAT
- && CONSTANT_P (trueop0) && CONSTANT_P (trueop1))
+ && CONSTANT_P (op0) && CONSTANT_P (op1))
{
unsigned n_elts = GET_MODE_NUNITS (mode);
rtvec v = rtvec_alloc (n_elts);
@@ -1224,34 +2053,34 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
gcc_assert (n_elts >= 2);
if (n_elts == 2)
{
- gcc_assert (GET_CODE (trueop0) != CONST_VECTOR);
- gcc_assert (GET_CODE (trueop1) != CONST_VECTOR);
+ gcc_assert (GET_CODE (op0) != CONST_VECTOR);
+ gcc_assert (GET_CODE (op1) != CONST_VECTOR);
- RTVEC_ELT (v, 0) = trueop0;
- RTVEC_ELT (v, 1) = trueop1;
+ RTVEC_ELT (v, 0) = op0;
+ RTVEC_ELT (v, 1) = op1;
}
else
{
- unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (trueop0));
- unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (trueop1));
+ unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
+ unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
unsigned i;
- gcc_assert (GET_CODE (trueop0) == CONST_VECTOR);
- gcc_assert (GET_CODE (trueop1) == CONST_VECTOR);
+ gcc_assert (GET_CODE (op0) == CONST_VECTOR);
+ gcc_assert (GET_CODE (op1) == CONST_VECTOR);
gcc_assert (op0_n_elts + op1_n_elts == n_elts);
for (i = 0; i < op0_n_elts; ++i)
- RTVEC_ELT (v, i) = XVECEXP (trueop0, 0, i);
+ RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
for (i = 0; i < op1_n_elts; ++i)
- RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (trueop1, 0, i);
+ RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
}
return gen_rtx_CONST_VECTOR (mode, v);
}
if (GET_MODE_CLASS (mode) == MODE_FLOAT
- && GET_CODE (trueop0) == CONST_DOUBLE
- && GET_CODE (trueop1) == CONST_DOUBLE
+ && GET_CODE (op0) == CONST_DOUBLE
+ && GET_CODE (op1) == CONST_DOUBLE
&& mode == GET_MODE (op0) && mode == GET_MODE (op1))
{
if (code == AND
@@ -1292,8 +2121,8 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
REAL_VALUE_TYPE f0, f1, value, result;
bool inexact;
- REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
- REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
+ REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
+ REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
real_convert (&f0, mode, &f0);
real_convert (&f1, mode, &f1);
@@ -1363,23 +2192,21 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
/* We can fold some multi-word operations. */
if (GET_MODE_CLASS (mode) == MODE_INT
&& width == HOST_BITS_PER_WIDE_INT * 2
- && (GET_CODE (trueop0) == CONST_DOUBLE
- || GET_CODE (trueop0) == CONST_INT)
- && (GET_CODE (trueop1) == CONST_DOUBLE
- || GET_CODE (trueop1) == CONST_INT))
+ && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
+ && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
{
unsigned HOST_WIDE_INT l1, l2, lv, lt;
HOST_WIDE_INT h1, h2, hv, ht;
- if (GET_CODE (trueop0) == CONST_DOUBLE)
- l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
+ if (GET_CODE (op0) == CONST_DOUBLE)
+ l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
else
- l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
+ l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
- if (GET_CODE (trueop1) == CONST_DOUBLE)
- l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
else
- l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
+ l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
switch (code)
{
@@ -1501,969 +2328,177 @@ simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
return immed_double_const (lv, hv, mode);
}
- if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
- || width > HOST_BITS_PER_WIDE_INT || width == 0)
+ if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
+ && width <= HOST_BITS_PER_WIDE_INT && width != 0)
{
- /* Even if we can't compute a constant result,
- there are some cases worth simplifying. */
+ /* Get the integer argument values in two forms:
+ zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
- switch (code)
- {
- case PLUS:
- /* Maybe simplify x + 0 to x. The two expressions are equivalent
- when x is NaN, infinite, or finite and nonzero. They aren't
- when x is -0 and the rounding mode is not towards -infinity,
- since (-0) + 0 is then 0. */
- if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
- return op0;
+ arg0 = INTVAL (op0);
+ arg1 = INTVAL (op1);
- /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
- transformations are safe even for IEEE. */
- if (GET_CODE (op0) == NEG)
- return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
- else if (GET_CODE (op1) == NEG)
- return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
-
- /* (~a) + 1 -> -a */
- if (INTEGRAL_MODE_P (mode)
- && GET_CODE (op0) == NOT
- && trueop1 == const1_rtx)
- return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
-
- /* Handle both-operands-constant cases. We can only add
- CONST_INTs to constants since the sum of relocatable symbols
- can't be handled by most assemblers. Don't add CONST_INT
- to CONST_INT since overflow won't be computed properly if wider
- than HOST_BITS_PER_WIDE_INT. */
-
- if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
- && GET_CODE (op1) == CONST_INT)
- return plus_constant (op0, INTVAL (op1));
- else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
- && GET_CODE (op0) == CONST_INT)
- return plus_constant (op1, INTVAL (op0));
-
- /* See if this is something like X * C - X or vice versa or
- if the multiplication is written as a shift. If so, we can
- distribute and make a new multiply, shift, or maybe just
- have X (if C is 2 in the example above). But don't make
- something more expensive than we had before. */
-
- if (! FLOAT_MODE_P (mode))
- {
- HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
- rtx lhs = op0, rhs = op1;
+ if (width < HOST_BITS_PER_WIDE_INT)
+ {
+ arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
+ arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
- if (GET_CODE (lhs) == NEG)
- coeff0 = -1, lhs = XEXP (lhs, 0);
- else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
- {
- coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
- }
- else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
- && INTVAL (XEXP (lhs, 1)) >= 0
- && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
- {
- coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- lhs = XEXP (lhs, 0);
- }
-
- if (GET_CODE (rhs) == NEG)
- coeff1 = -1, rhs = XEXP (rhs, 0);
- else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
- {
- coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
- }
- else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
- && INTVAL (XEXP (rhs, 1)) >= 0
- && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
- {
- coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
- rhs = XEXP (rhs, 0);
- }
-
- if (rtx_equal_p (lhs, rhs))
- {
- rtx orig = gen_rtx_PLUS (mode, op0, op1);
- tem = simplify_gen_binary (MULT, mode, lhs,
- GEN_INT (coeff0 + coeff1));
- return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
- ? tem : 0;
- }
- }
-
- /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
- || GET_CODE (op1) == CONST_DOUBLE)
- && GET_CODE (op0) == XOR
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
- || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
- && mode_signbit_p (mode, op1))
- return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
- simplify_gen_binary (XOR, mode, op1,
- XEXP (op0, 1)));
-
- /* If one of the operands is a PLUS or a MINUS, see if we can
- simplify this by the associative law.
- Don't use the associative law for floating point.
- The inaccuracy makes it nonassociative,
- and subtle programs can break if operations are associated. */
-
- if (INTEGRAL_MODE_P (mode)
- && (plus_minus_operand_p (op0)
- || plus_minus_operand_p (op1))
- && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
- return tem;
+ arg0s = arg0;
+ if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ arg0s |= ((HOST_WIDE_INT) (-1) << width);
- /* Reassociate floating point addition only when the user
- specifies unsafe math optimizations. */
- if (FLOAT_MODE_P (mode)
- && flag_unsafe_math_optimizations)
- {
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
- }
- break;
-
- case COMPARE:
-#ifdef HAVE_cc0
- /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
- using cc0, in which case we want to leave it as a COMPARE
- so we can distinguish it from a register-register-copy.
-
- In IEEE floating point, x-0 is not the same as x. */
-
- if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
- || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
- && trueop1 == CONST0_RTX (mode))
- return op0;
-#endif
-
- /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
- if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
- || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
- && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
- {
- rtx xop00 = XEXP (op0, 0);
- rtx xop10 = XEXP (op1, 0);
-
-#ifdef HAVE_cc0
- if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
-#else
- if (REG_P (xop00) && REG_P (xop10)
- && GET_MODE (xop00) == GET_MODE (xop10)
- && REGNO (xop00) == REGNO (xop10)
- && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
- && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
-#endif
- return xop00;
- }
+ arg1s = arg1;
+ if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
+ arg1s |= ((HOST_WIDE_INT) (-1) << width);
+ }
+ else
+ {
+ arg0s = arg0;
+ arg1s = arg1;
+ }
+
+ /* Compute the value of the arithmetic. */
+
+ switch (code)
+ {
+ case PLUS:
+ val = arg0s + arg1s;
break;
-
+
case MINUS:
- /* We can't assume x-x is 0 even with non-IEEE floating point,
- but since it is zero except in very strange circumstances, we
- will treat it as zero with -funsafe-math-optimizations. */
- if (rtx_equal_p (trueop0, trueop1)
- && ! side_effects_p (op0)
- && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
- return CONST0_RTX (mode);
-
- /* Change subtraction from zero into negation. (0 - x) is the
- same as -x when x is NaN, infinite, or finite and nonzero.
- But if the mode has signed zeros, and does not round towards
- -infinity, then 0 - 0 is 0, not -0. */
- if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
- return simplify_gen_unary (NEG, mode, op1, mode);
-
- /* (-1 - a) is ~a. */
- if (trueop0 == constm1_rtx)
- return simplify_gen_unary (NOT, mode, op1, mode);
-
- /* Subtracting 0 has no effect unless the mode has signed zeros
- and supports rounding towards -infinity. In such a case,
- 0 - 0 is -0. */
- if (!(HONOR_SIGNED_ZEROS (mode)
- && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
- && trueop1 == CONST0_RTX (mode))
- return op0;
-
- /* See if this is something like X * C - X or vice versa or
- if the multiplication is written as a shift. If so, we can
- distribute and make a new multiply, shift, or maybe just
- have X (if C is 2 in the example above). But don't make
- something more expensive than we had before. */
-
- if (! FLOAT_MODE_P (mode))
- {
- HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
- rtx lhs = op0, rhs = op1;
-
- if (GET_CODE (lhs) == NEG)
- coeff0 = -1, lhs = XEXP (lhs, 0);
- else if (GET_CODE (lhs) == MULT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
- {
- coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
- }
- else if (GET_CODE (lhs) == ASHIFT
- && GET_CODE (XEXP (lhs, 1)) == CONST_INT
- && INTVAL (XEXP (lhs, 1)) >= 0
- && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
- {
- coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
- lhs = XEXP (lhs, 0);
- }
-
- if (GET_CODE (rhs) == NEG)
- coeff1 = - 1, rhs = XEXP (rhs, 0);
- else if (GET_CODE (rhs) == MULT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
- {
- coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
- }
- else if (GET_CODE (rhs) == ASHIFT
- && GET_CODE (XEXP (rhs, 1)) == CONST_INT
- && INTVAL (XEXP (rhs, 1)) >= 0
- && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
- {
- coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
- rhs = XEXP (rhs, 0);
- }
-
- if (rtx_equal_p (lhs, rhs))
- {
- rtx orig = gen_rtx_MINUS (mode, op0, op1);
- tem = simplify_gen_binary (MULT, mode, lhs,
- GEN_INT (coeff0 - coeff1));
- return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
- ? tem : 0;
- }
- }
-
- /* (a - (-b)) -> (a + b). True even for IEEE. */
- if (GET_CODE (op1) == NEG)
- return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
-
- /* (-x - c) may be simplified as (-c - x). */
- if (GET_CODE (op0) == NEG
- && (GET_CODE (op1) == CONST_INT
- || GET_CODE (op1) == CONST_DOUBLE))
- {
- tem = simplify_unary_operation (NEG, mode, op1, mode);
- if (tem)
- return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
- }
-
- /* If one of the operands is a PLUS or a MINUS, see if we can
- simplify this by the associative law.
- Don't use the associative law for floating point.
- The inaccuracy makes it nonassociative,
- and subtle programs can break if operations are associated. */
-
- if (INTEGRAL_MODE_P (mode)
- && (plus_minus_operand_p (op0)
- || plus_minus_operand_p (op1))
- && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
- return tem;
-
- /* Don't let a relocatable value get a negative coeff. */
- if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
- return simplify_gen_binary (PLUS, mode,
- op0,
- neg_const_int (mode, op1));
-
- /* (x - (x & y)) -> (x & ~y) */
- if (GET_CODE (op1) == AND)
- {
- if (rtx_equal_p (op0, XEXP (op1, 0)))
- {
- tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
- GET_MODE (XEXP (op1, 1)));
- return simplify_gen_binary (AND, mode, op0, tem);
- }
- if (rtx_equal_p (op0, XEXP (op1, 1)))
- {
- tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
- GET_MODE (XEXP (op1, 0)));
- return simplify_gen_binary (AND, mode, op0, tem);
- }
- }
+ val = arg0s - arg1s;
break;
-
+
case MULT:
- if (trueop1 == constm1_rtx)
- return simplify_gen_unary (NEG, mode, op0, mode);
-
- /* Maybe simplify x * 0 to 0. The reduction is not valid if
- x is NaN, since x * 0 is then also NaN. Nor is it valid
- when the mode has signed zeros, since multiplying a negative
- number by 0 will give -0, not 0. */
- if (!HONOR_NANS (mode)
- && !HONOR_SIGNED_ZEROS (mode)
- && trueop1 == CONST0_RTX (mode)
- && ! side_effects_p (op0))
- return op1;
-
- /* In IEEE floating point, x*1 is not equivalent to x for
- signalling NaNs. */
- if (!HONOR_SNANS (mode)
- && trueop1 == CONST1_RTX (mode))
- return op0;
-
- /* Convert multiply by constant power of two into shift unless
- we are still generating RTL. This test is a kludge. */
- if (GET_CODE (trueop1) == CONST_INT
- && (val = exact_log2 (INTVAL (trueop1))) >= 0
- /* If the mode is larger than the host word size, and the
- uppermost bit is set, then this isn't a power of two due
- to implicit sign extension. */
- && (width <= HOST_BITS_PER_WIDE_INT
- || val != HOST_BITS_PER_WIDE_INT - 1))
- return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
-
- /* x*2 is x+x and x*(-1) is -x */
- if (GET_CODE (trueop1) == CONST_DOUBLE
- && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
- && GET_MODE (op0) == mode)
- {
- REAL_VALUE_TYPE d;
- REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
-
- if (REAL_VALUES_EQUAL (d, dconst2))
- return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
-
- if (REAL_VALUES_EQUAL (d, dconstm1))
- return simplify_gen_unary (NEG, mode, op0, mode);
- }
-
- /* Reassociate multiplication, but for floating point MULTs
- only when the user specifies unsafe math optimizations. */
- if (! FLOAT_MODE_P (mode)
- || flag_unsafe_math_optimizations)
- {
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
- }
- break;
-
- case IOR:
- if (trueop1 == const0_rtx)
- return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
- return op1;
- if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
- return op0;
- /* A | (~A) -> -1 */
- if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
- || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
- && ! side_effects_p (op0)
- && GET_MODE_CLASS (mode) != MODE_CC)
- return constm1_rtx;
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
+ val = arg0s * arg1s;
break;
-
- case XOR:
- if (trueop1 == const0_rtx)
- return op0;
- if (GET_CODE (trueop1) == CONST_INT
- && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
- == GET_MODE_MASK (mode)))
- return simplify_gen_unary (NOT, mode, op0, mode);
- if (trueop0 == trueop1
- && ! side_effects_p (op0)
- && GET_MODE_CLASS (mode) != MODE_CC)
- return const0_rtx;
-
- /* Canonicalize XOR of the most significant bit to PLUS. */
- if ((GET_CODE (op1) == CONST_INT
- || GET_CODE (op1) == CONST_DOUBLE)
- && mode_signbit_p (mode, op1))
- return simplify_gen_binary (PLUS, mode, op0, op1);
- /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
- if ((GET_CODE (op1) == CONST_INT
- || GET_CODE (op1) == CONST_DOUBLE)
- && GET_CODE (op0) == PLUS
- && (GET_CODE (XEXP (op0, 1)) == CONST_INT
- || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
- && mode_signbit_p (mode, XEXP (op0, 1)))
- return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
- simplify_gen_binary (XOR, mode, op1,
- XEXP (op0, 1)));
-
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
+
+ case DIV:
+ if (arg1s == 0
+ || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ && arg1s == -1))
+ return 0;
+ val = arg0s / arg1s;
break;
-
- case AND:
- if (trueop1 == const0_rtx && ! side_effects_p (op0))
- return const0_rtx;
- /* If we are turning off bits already known off in OP0, we need
- not do an AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
- return op0;
- if (trueop0 == trueop1 && ! side_effects_p (op0)
- && GET_MODE_CLASS (mode) != MODE_CC)
- return op0;
- /* A & (~A) -> 0 */
- if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
- || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
- && ! side_effects_p (op0)
- && GET_MODE_CLASS (mode) != MODE_CC)
- return const0_rtx;
-
- /* Transform (and (extend X) C) into (zero_extend (and X C)) if
- there are no nonzero bits of C outside of X's mode. */
- if ((GET_CODE (op0) == SIGN_EXTEND
- || GET_CODE (op0) == ZERO_EXTEND)
- && GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
- & INTVAL (trueop1)) == 0)
- {
- enum machine_mode imode = GET_MODE (XEXP (op0, 0));
- tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
- gen_int_mode (INTVAL (trueop1),
- imode));
- return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
- }
-
- /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
- ((A & N) + B) & M -> (A + B) & M
- Similarly if (N & M) == 0,
- ((A | N) + B) & M -> (A + B) & M
- and for - instead of + and/or ^ instead of |. */
- if (GET_CODE (trueop1) == CONST_INT
- && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
- && ~INTVAL (trueop1)
- && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
- && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
- {
- rtx pmop[2];
- int which;
-
- pmop[0] = XEXP (op0, 0);
- pmop[1] = XEXP (op0, 1);
-
- for (which = 0; which < 2; which++)
- {
- tem = pmop[which];
- switch (GET_CODE (tem))
- {
- case AND:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
- == INTVAL (trueop1))
- pmop[which] = XEXP (tem, 0);
- break;
- case IOR:
- case XOR:
- if (GET_CODE (XEXP (tem, 1)) == CONST_INT
- && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
- pmop[which] = XEXP (tem, 0);
- break;
- default:
- break;
- }
- }
-
- if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
- {
- tem = simplify_gen_binary (GET_CODE (op0), mode,
- pmop[0], pmop[1]);
- return simplify_gen_binary (code, mode, tem, op1);
- }
- }
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
+
+ case MOD:
+ if (arg1s == 0
+ || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ && arg1s == -1))
+ return 0;
+ val = arg0s % arg1s;
break;
-
+
case UDIV:
- /* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
- /* x/1 is x. */
- if (trueop1 == const1_rtx)
- {
- /* Handle narrowing UDIV. */
- rtx x = gen_lowpart_common (mode, op0);
- if (x)
- return x;
- if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
- return gen_lowpart_SUBREG (mode, op0);
- return op0;
- }
- /* Convert divide by power of two into shift. */
- if (GET_CODE (trueop1) == CONST_INT
- && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
- return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
- break;
-
- case DIV:
- /* Handle floating point and integers separately. */
- if (GET_MODE_CLASS (mode) == MODE_FLOAT)
- {
- /* Maybe change 0.0 / x to 0.0. This transformation isn't
- safe for modes with NaNs, since 0.0 / 0.0 will then be
- NaN rather than 0.0. Nor is it safe for modes with signed
- zeros, since dividing 0 by a negative number gives -0.0 */
- if (trueop0 == CONST0_RTX (mode)
- && !HONOR_NANS (mode)
- && !HONOR_SIGNED_ZEROS (mode)
- && ! side_effects_p (op1))
- return op0;
- /* x/1.0 is x. */
- if (trueop1 == CONST1_RTX (mode)
- && !HONOR_SNANS (mode))
- return op0;
-
- if (GET_CODE (trueop1) == CONST_DOUBLE
- && trueop1 != CONST0_RTX (mode))
- {
- REAL_VALUE_TYPE d;
- REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
-
- /* x/-1.0 is -x. */
- if (REAL_VALUES_EQUAL (d, dconstm1)
- && !HONOR_SNANS (mode))
- return simplify_gen_unary (NEG, mode, op0, mode);
-
- /* Change FP division by a constant into multiplication.
- Only do this with -funsafe-math-optimizations. */
- if (flag_unsafe_math_optimizations
- && !REAL_VALUES_EQUAL (d, dconst0))
- {
- REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
- tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
- return simplify_gen_binary (MULT, mode, op0, tem);
- }
- }
- }
- else
- {
- /* 0/x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
- /* x/1 is x. */
- if (trueop1 == const1_rtx)
- {
- /* Handle narrowing DIV. */
- rtx x = gen_lowpart_common (mode, op0);
- if (x)
- return x;
- if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
- return gen_lowpart_SUBREG (mode, op0);
- return op0;
- }
- /* x/-1 is -x. */
- if (trueop1 == constm1_rtx)
- {
- rtx x = gen_lowpart_common (mode, op0);
- if (!x)
- x = (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
- ? gen_lowpart_SUBREG (mode, op0) : op0;
- return simplify_gen_unary (NEG, mode, x, mode);
- }
- }
+ if (arg1 == 0
+ || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ && arg1s == -1))
+ return 0;
+ val = (unsigned HOST_WIDE_INT) arg0 / arg1;
break;
-
+
case UMOD:
- /* 0%x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
- /* x%1 is 0 (of x&0 if x has side-effects). */
- if (trueop1 == const1_rtx)
- return side_effects_p (op0)
- ? simplify_gen_binary (AND, mode, op0, const0_rtx)
- : const0_rtx;
- /* Implement modulus by power of two as AND. */
- if (GET_CODE (trueop1) == CONST_INT
- && exact_log2 (INTVAL (trueop1)) > 0)
- return simplify_gen_binary (AND, mode, op0,
- GEN_INT (INTVAL (op1) - 1));
+ if (arg1 == 0
+ || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
+ && arg1s == -1))
+ return 0;
+ val = (unsigned HOST_WIDE_INT) arg0 % arg1;
break;
-
- case MOD:
- /* 0%x is 0 (or x&0 if x has side-effects). */
- if (trueop0 == const0_rtx)
- return side_effects_p (op1)
- ? simplify_gen_binary (AND, mode, op1, const0_rtx)
- : const0_rtx;
- /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
- if (trueop1 == const1_rtx || trueop1 == constm1_rtx)
- return side_effects_p (op0)
- ? simplify_gen_binary (AND, mode, op0, const0_rtx)
- : const0_rtx;
+
+ case AND:
+ val = arg0 & arg1;
break;
-
+
+ case IOR:
+ val = arg0 | arg1;
+ break;
+
+ case XOR:
+ val = arg0 ^ arg1;
+ break;
+
+ case LSHIFTRT:
+ case ASHIFT:
+ case ASHIFTRT:
+ /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
+ the value is in range. We can't return any old value for
+ out-of-range arguments because either the middle-end (via
+ shift_truncation_mask) or the back-end might be relying on
+ target-specific knowledge. Nor can we rely on
+ shift_truncation_mask, since the shift might not be part of an
+ ashlM3, lshrM3 or ashrM3 instruction. */
+ if (SHIFT_COUNT_TRUNCATED)
+ arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
+ else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
+ return 0;
+
+ val = (code == ASHIFT
+ ? ((unsigned HOST_WIDE_INT) arg0) << arg1
+ : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
+
+ /* Sign-extend the result for arithmetic right shifts. */
+ if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
+ val |= ((HOST_WIDE_INT) -1) << (width - arg1);
+ break;
+
case ROTATERT:
+ if (arg1 < 0)
+ return 0;
+
+ arg1 %= width;
+ val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
+ | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
+ break;
+
case ROTATE:
- case ASHIFTRT:
- /* Rotating ~0 always results in ~0. */
- if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
- && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
- && ! side_effects_p (op1))
- return op0;
-
- /* Fall through.... */
-
- case ASHIFT:
- case LSHIFTRT:
- if (trueop1 == const0_rtx)
- return op0;
- if (trueop0 == const0_rtx && ! side_effects_p (op1))
- return op0;
+ if (arg1 < 0)
+ return 0;
+
+ arg1 %= width;
+ val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
+ | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
break;
-
+
+ case COMPARE:
+ /* Do nothing here. */
+ return 0;
+
case SMIN:
- if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
- && ! side_effects_p (op0))
- return op1;
- if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
- return op0;
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
+ val = arg0s <= arg1s ? arg0s : arg1s;
break;
-
- case SMAX:
- if (width <= HOST_BITS_PER_WIDE_INT
- && GET_CODE (trueop1) == CONST_INT
- && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
- == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
- && ! side_effects_p (op0))
- return op1;
- if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
- return op0;
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
- break;
-
+
case UMIN:
- if (trueop1 == const0_rtx && ! side_effects_p (op0))
- return op1;
- if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
- return op0;
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
+ val = ((unsigned HOST_WIDE_INT) arg0
+ <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
+ case SMAX:
+ val = arg0s > arg1s ? arg0s : arg1s;
+ break;
+
case UMAX:
- if (trueop1 == constm1_rtx && ! side_effects_p (op0))
- return op1;
- if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
- return op0;
- tem = simplify_associative_operation (code, mode, op0, op1);
- if (tem)
- return tem;
+ val = ((unsigned HOST_WIDE_INT) arg0
+ > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
break;
-
+
case SS_PLUS:
case US_PLUS:
case SS_MINUS:
case US_MINUS:
/* ??? There are simplifications that can be done. */
return 0;
-
- case VEC_SELECT:
- if (!VECTOR_MODE_P (mode))
- {
- gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
- gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
- gcc_assert (GET_CODE (trueop1) == PARALLEL);
- gcc_assert (XVECLEN (trueop1, 0) == 1);
- gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
-
- if (GET_CODE (trueop0) == CONST_VECTOR)
- return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
- (trueop1, 0, 0)));
- }
- else
- {
- gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
- gcc_assert (GET_MODE_INNER (mode)
- == GET_MODE_INNER (GET_MODE (trueop0)));
- gcc_assert (GET_CODE (trueop1) == PARALLEL);
-
- if (GET_CODE (trueop0) == CONST_VECTOR)
- {
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
- unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
- rtvec v = rtvec_alloc (n_elts);
- unsigned int i;
-
- gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
- for (i = 0; i < n_elts; i++)
- {
- rtx x = XVECEXP (trueop1, 0, i);
-
- gcc_assert (GET_CODE (x) == CONST_INT);
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
- INTVAL (x));
- }
-
- return gen_rtx_CONST_VECTOR (mode, v);
- }
- }
- return 0;
- case VEC_CONCAT:
- {
- enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
- ? GET_MODE (trueop0)
- : GET_MODE_INNER (mode));
- enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
- ? GET_MODE (trueop1)
- : GET_MODE_INNER (mode));
-
- gcc_assert (VECTOR_MODE_P (mode));
- gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
- == GET_MODE_SIZE (mode));
-
- if (VECTOR_MODE_P (op0_mode))
- gcc_assert (GET_MODE_INNER (mode)
- == GET_MODE_INNER (op0_mode));
- else
- gcc_assert (GET_MODE_INNER (mode) == op0_mode);
-
- if (VECTOR_MODE_P (op1_mode))
- gcc_assert (GET_MODE_INNER (mode)
- == GET_MODE_INNER (op1_mode));
- else
- gcc_assert (GET_MODE_INNER (mode) == op1_mode);
-
- if ((GET_CODE (trueop0) == CONST_VECTOR
- || GET_CODE (trueop0) == CONST_INT
- || GET_CODE (trueop0) == CONST_DOUBLE)
- && (GET_CODE (trueop1) == CONST_VECTOR
- || GET_CODE (trueop1) == CONST_INT
- || GET_CODE (trueop1) == CONST_DOUBLE))
- {
- int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
- unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
- rtvec v = rtvec_alloc (n_elts);
- unsigned int i;
- unsigned in_n_elts = 1;
-
- if (VECTOR_MODE_P (op0_mode))
- in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
- for (i = 0; i < n_elts; i++)
- {
- if (i < in_n_elts)
- {
- if (!VECTOR_MODE_P (op0_mode))
- RTVEC_ELT (v, i) = trueop0;
- else
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
- }
- else
- {
- if (!VECTOR_MODE_P (op1_mode))
- RTVEC_ELT (v, i) = trueop1;
- else
- RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
- i - in_n_elts);
- }
- }
-
- return gen_rtx_CONST_VECTOR (mode, v);
- }
- }
- return 0;
-
+
default:
gcc_unreachable ();
}
- return 0;
- }
-
- /* Get the integer argument values in two forms:
- zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
-
- arg0 = INTVAL (trueop0);
- arg1 = INTVAL (trueop1);
-
- if (width < HOST_BITS_PER_WIDE_INT)
- {
- arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
- arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
-
- arg0s = arg0;
- if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg0s |= ((HOST_WIDE_INT) (-1) << width);
-
- arg1s = arg1;
- if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
- arg1s |= ((HOST_WIDE_INT) (-1) << width);
- }
- else
- {
- arg0s = arg0;
- arg1s = arg1;
+ val = trunc_int_for_mode (val, mode);
+ return GEN_INT (val);
}
- /* Compute the value of the arithmetic. */
-
- switch (code)
- {
- case PLUS:
- val = arg0s + arg1s;
- break;
-
- case MINUS:
- val = arg0s - arg1s;
- break;
-
- case MULT:
- val = arg0s * arg1s;
- break;
-
- case DIV:
- if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = arg0s / arg1s;
- break;
-
- case MOD:
- if (arg1s == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = arg0s % arg1s;
- break;
-
- case UDIV:
- if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = (unsigned HOST_WIDE_INT) arg0 / arg1;
- break;
-
- case UMOD:
- if (arg1 == 0
- || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
- && arg1s == -1))
- return 0;
- val = (unsigned HOST_WIDE_INT) arg0 % arg1;
- break;
-
- case AND:
- val = arg0 & arg1;
- break;
-
- case IOR:
- val = arg0 | arg1;
- break;
-
- case XOR:
- val = arg0 ^ arg1;
- break;
-
- case LSHIFTRT:
- case ASHIFT:
- case ASHIFTRT:
- /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure the
- value is in range. We can't return any old value for out-of-range
- arguments because either the middle-end (via shift_truncation_mask)
- or the back-end might be relying on target-specific knowledge.
- Nor can we rely on shift_truncation_mask, since the shift might
- not be part of an ashlM3, lshrM3 or ashrM3 instruction. */
- if (SHIFT_COUNT_TRUNCATED)
- arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
- else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
- return 0;
-
- val = (code == ASHIFT
- ? ((unsigned HOST_WIDE_INT) arg0) << arg1
- : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
-
- /* Sign-extend the result for arithmetic right shifts. */
- if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
- val |= ((HOST_WIDE_INT) -1) << (width - arg1);
- break;
-
- case ROTATERT:
- if (arg1 < 0)
- return 0;
-
- arg1 %= width;
- val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
- | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
- break;
-
- case ROTATE:
- if (arg1 < 0)
- return 0;
-
- arg1 %= width;
- val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
- | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
- break;
-
- case COMPARE:
- /* Do nothing here. */
- return 0;
-
- case SMIN:
- val = arg0s <= arg1s ? arg0s : arg1s;
- break;
-
- case UMIN:
- val = ((unsigned HOST_WIDE_INT) arg0
- <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
- break;
-
- case SMAX:
- val = arg0s > arg1s ? arg0s : arg1s;
- break;
-
- case UMAX:
- val = ((unsigned HOST_WIDE_INT) arg0
- > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
- break;
-
- case SS_PLUS:
- case US_PLUS:
- case SS_MINUS:
- case US_MINUS:
- /* ??? There are simplifications that can be done. */
- return 0;
-
- default:
- gcc_unreachable ();
- }
+ return NULL_RTX;
+}
- val = trunc_int_for_mode (val, mode);
- return GEN_INT (val);
-}
/* Simplify a PLUS or MINUS, at least one of whose operands may be another
PLUS or MINUS.