aboutsummaryrefslogtreecommitdiff
path: root/gcc/simplify-rtx.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-10-22 21:39:29 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-10-22 21:39:29 +0000
commitbb06a2d85567fc0d5d82d28629ebc54453c35f17 (patch)
tree4eaaa8513cdd739b4ec805e56e182a356e6d60b9 /gcc/simplify-rtx.c
parent1e3734f59eee29ed335da8fc9f40e66903f64b20 (diff)
downloadgcc-bb06a2d85567fc0d5d82d28629ebc54453c35f17.zip
gcc-bb06a2d85567fc0d5d82d28629ebc54453c35f17.tar.gz
gcc-bb06a2d85567fc0d5d82d28629ebc54453c35f17.tar.bz2
Make more use of GET_MODE_UNIT_PRECISION
This patch is like the earlier GET_MODE_UNIT_SIZE one, but for precisions rather than sizes. There is one behavioural change in expand_debug_expr: we shouldn't use lowpart subregs for non-scalar truncations, since that would just reinterpret some of the scalars and drop the rest. (This probably doesn't trigger in practice.) Using TRUNCATE is fine for scalars, since simplify_gen_unary knows when a subreg can be used. 2017-10-22 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * cfgexpand.c (expand_debug_expr): Use GET_MODE_UNIT_PRECISION. (expand_debug_source_expr): Likewise. * combine.c (combine_simplify_rtx): Likewise. * cse.c (fold_rtx): Likewise. * optabs.c (expand_float): Likewise. * simplify-rtx.c (simplify_unary_operation_1): Likewise. (simplify_binary_operation_1): Likewise. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r253991
Diffstat (limited to 'gcc/simplify-rtx.c')
-rw-r--r--gcc/simplify-rtx.c48
1 files changed, 26 insertions, 22 deletions
diff --git a/gcc/simplify-rtx.c b/gcc/simplify-rtx.c
index ba6b225b..da0283d 100644
--- a/gcc/simplify-rtx.c
+++ b/gcc/simplify-rtx.c
@@ -1136,7 +1136,7 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == ASHIFTRT
&& CONST_INT_P (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
+ && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
return simplify_gen_binary (LSHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
@@ -1144,7 +1144,7 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
C is equal to the width of MODE minus 1. */
if (GET_CODE (op) == LSHIFTRT
&& CONST_INT_P (XEXP (op, 1))
- && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1)
+ && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
return simplify_gen_binary (ASHIFTRT, mode,
XEXP (op, 0), XEXP (op, 1));
@@ -1446,19 +1446,21 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
if (lcode == ASHIFTRT)
/* Number of bits not shifted off the end. */
- bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ bits = (GET_MODE_UNIT_PRECISION (lmode)
+ - INTVAL (XEXP (lhs, 1)));
else /* lcode == SIGN_EXTEND */
/* Size of inner mode. */
- bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+ bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
if (rcode == ASHIFTRT)
- bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ bits += (GET_MODE_UNIT_PRECISION (rmode)
+ - INTVAL (XEXP (rhs, 1)));
else /* rcode == SIGN_EXTEND */
- bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+ bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
/* We can only widen multiplies if the result is mathematiclly
equivalent. I.e. if overflow was impossible. */
- if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
return simplify_gen_binary
(MULT, mode,
simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
@@ -1483,8 +1485,8 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
(sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
{
- gcc_assert (GET_MODE_PRECISION (mode)
- > GET_MODE_PRECISION (GET_MODE (op)));
+ gcc_assert (GET_MODE_UNIT_PRECISION (mode)
+ > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
GET_MODE (XEXP (op, 0)));
}
@@ -1584,19 +1586,21 @@ simplify_unary_operation_1 (enum rtx_code code, machine_mode mode, rtx op)
if (lcode == LSHIFTRT)
/* Number of bits not shifted off the end. */
- bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1));
+ bits = (GET_MODE_UNIT_PRECISION (lmode)
+ - INTVAL (XEXP (lhs, 1)));
else /* lcode == ZERO_EXTEND */
/* Size of inner mode. */
- bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0)));
+ bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
if (rcode == LSHIFTRT)
- bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1));
+ bits += (GET_MODE_UNIT_PRECISION (rmode)
+ - INTVAL (XEXP (rhs, 1)));
else /* rcode == ZERO_EXTEND */
- bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0)));
+ bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
/* We can only widen multiplies if the result is mathematiclly
equivalent. I.e. if overflow was impossible. */
- if (bits <= GET_MODE_PRECISION (GET_MODE (op)))
+ if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
return simplify_gen_binary
(MULT, mode,
simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
@@ -2144,7 +2148,6 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
{
rtx tem, reversed, opleft, opright;
HOST_WIDE_INT val;
- unsigned int width = GET_MODE_PRECISION (mode);
scalar_int_mode int_mode, inner_mode;
/* Even if we can't compute a constant result,
@@ -2702,7 +2705,7 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
&& CONST_INT_P (XEXP (opleft, 1))
&& CONST_INT_P (XEXP (opright, 1))
&& (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
- == GET_MODE_PRECISION (mode)))
+ == GET_MODE_UNIT_PRECISION (mode)))
return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
/* Same, but for ashift that has been "simplified" to a wider mode
@@ -3328,11 +3331,12 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
#if defined(HAVE_rotate) && defined(HAVE_rotatert)
if (CONST_INT_P (trueop1)
&& IN_RANGE (INTVAL (trueop1),
- GET_MODE_PRECISION (mode) / 2 + (code == ROTATE),
- GET_MODE_PRECISION (mode) - 1))
+ GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE),
+ GET_MODE_UNIT_PRECISION (mode) - 1))
return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
- mode, op0, GEN_INT (GET_MODE_PRECISION (mode)
- - INTVAL (trueop1)));
+ mode, op0,
+ GEN_INT (GET_MODE_UNIT_PRECISION (mode)
+ - INTVAL (trueop1)));
#endif
/* FALLTHRU */
case ASHIFTRT:
@@ -3383,7 +3387,7 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
{
- val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1);
+ val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
if (val != INTVAL (op1))
return simplify_gen_binary (code, mode, op0, GEN_INT (val));
}
@@ -3408,7 +3412,7 @@ simplify_binary_operation_1 (enum rtx_code code, machine_mode mode,
&& is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
&& CONST_INT_P (trueop1)
&& STORE_FLAG_VALUE == 1
- && INTVAL (trueop1) < (HOST_WIDE_INT)width)
+ && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
{
unsigned HOST_WIDE_INT zero_val = 0;