aboutsummaryrefslogtreecommitdiff
path: root/gcc/cse.c
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@linaro.org>2017-10-22 21:39:29 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2017-10-22 21:39:29 +0000
commitbb06a2d85567fc0d5d82d28629ebc54453c35f17 (patch)
tree4eaaa8513cdd739b4ec805e56e182a356e6d60b9 /gcc/cse.c
parent1e3734f59eee29ed335da8fc9f40e66903f64b20 (diff)
downloadgcc-bb06a2d85567fc0d5d82d28629ebc54453c35f17.zip
gcc-bb06a2d85567fc0d5d82d28629ebc54453c35f17.tar.gz
gcc-bb06a2d85567fc0d5d82d28629ebc54453c35f17.tar.bz2
Make more use of GET_MODE_UNIT_PRECISION
This patch is like the earlier GET_MODE_UNIT_SIZE one, but for precisions rather than sizes. There is one behavioural change in expand_debug_expr: we shouldn't use lowpart subregs for non-scalar truncations, since that would just reinterpret some of the scalars and drop the rest. (This probably doesn't trigger in practice.) Using TRUNCATE is fine for scalars, since simplify_gen_unary knows when a subreg can be used. 2017-10-22 Richard Sandiford <richard.sandiford@linaro.org> Alan Hayward <alan.hayward@arm.com> David Sherwood <david.sherwood@arm.com> gcc/ * cfgexpand.c (expand_debug_expr): Use GET_MODE_UNIT_PRECISION. (expand_debug_source_expr): Likewise. * combine.c (combine_simplify_rtx): Likewise. * cse.c (fold_rtx): Likewise. * optabs.c (expand_float): Likewise. * simplify-rtx.c (simplify_unary_operation_1): Likewise. (simplify_binary_operation_1): Likewise. Co-Authored-By: Alan Hayward <alan.hayward@arm.com> Co-Authored-By: David Sherwood <david.sherwood@arm.com> From-SVN: r253991
Diffstat (limited to 'gcc/cse.c')
-rw-r--r--gcc/cse.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/gcc/cse.c b/gcc/cse.c
index 25653ac..65cc9ae 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -3607,7 +3607,7 @@ fold_rtx (rtx x, rtx_insn *insn)
enum rtx_code associate_code;
if (is_shift
- && (INTVAL (const_arg1) >= GET_MODE_PRECISION (mode)
+ && (INTVAL (const_arg1) >= GET_MODE_UNIT_PRECISION (mode)
|| INTVAL (const_arg1) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
@@ -3656,7 +3656,7 @@ fold_rtx (rtx x, rtx_insn *insn)
break;
if (is_shift
- && (INTVAL (inner_const) >= GET_MODE_PRECISION (mode)
+ && (INTVAL (inner_const) >= GET_MODE_UNIT_PRECISION (mode)
|| INTVAL (inner_const) < 0))
{
if (SHIFT_COUNT_TRUNCATED)
@@ -3687,7 +3687,7 @@ fold_rtx (rtx x, rtx_insn *insn)
if (is_shift
&& CONST_INT_P (new_const)
- && INTVAL (new_const) >= GET_MODE_PRECISION (mode))
+ && INTVAL (new_const) >= GET_MODE_UNIT_PRECISION (mode))
{
/* As an exception, we can turn an ASHIFTRT of this
form into a shift of the number of bits - 1. */