aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog20
-rw-r--r--gcc/convert.c3
-rw-r--r--gcc/expr.c7
-rw-r--r--gcc/fold-const.c3
-rw-r--r--gcc/match.pd27
-rw-r--r--gcc/tree-ssa-forwprop.c14
-rw-r--r--gcc/tree-ssa-math-opts.c3
-rw-r--r--gcc/tree-tailcall.c3
-rw-r--r--gcc/tree-vect-loop.c3
-rw-r--r--gcc/tree-vect-patterns.c7
-rw-r--r--gcc/tree-vect-stmts.c18
-rw-r--r--gcc/tree-vrp.c2
-rw-r--r--gcc/tree.h9
13 files changed, 62 insertions, 57 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 824d954..e7d4e16 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,23 @@
+2017-08-21 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * tree.h (type_has_mode_precision_p): New function.
+ * convert.c (convert_to_integer_1): Use it.
+ * expr.c (expand_expr_real_2): Likewise.
+ (expand_expr_real_1): Likewise.
+ * fold-const.c (fold_single_bit_test_into_sign_test): Likewise.
+ * match.pd: Likewise.
+ * tree-ssa-forwprop.c (simplify_rotate): Likewise.
+ * tree-ssa-math-opts.c (convert_mult_to_fma): Likewise.
+ * tree-tailcall.c (process_assignment): Likewise.
+ * tree-vect-loop.c (vectorizable_reduction): Likewise.
+ * tree-vect-patterns.c (vect_recog_vector_vector_shift_pattern)
+ (vect_recog_mult_pattern, vect_recog_divmod_pattern): Likewise.
+ * tree-vect-stmts.c (vectorizable_conversion): Likewise.
+ (vectorizable_assignment): Likewise.
+ (vectorizable_shift): Likewise.
+ (vectorizable_operation): Likewise.
+ * tree-vrp.c (register_edge_assert_for_2): Likewise.
+
2017-08-21 Wilco Dijkstra <wdijkstr@arm.com>
* match.pd: Add pow (C, x) simplification.
diff --git a/gcc/convert.c b/gcc/convert.c
index b1a53af..22152ca 100644
--- a/gcc/convert.c
+++ b/gcc/convert.c
@@ -711,8 +711,7 @@ convert_to_integer_1 (tree type, tree expr, bool dofold)
the signed-to-unsigned case the high-order bits have to
be cleared. */
if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
- && (TYPE_PRECISION (TREE_TYPE (expr))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
+ && !type_has_mode_precision_p (TREE_TYPE (expr)))
code = CONVERT_EXPR;
else
code = NOP_EXPR;
diff --git a/gcc/expr.c b/gcc/expr.c
index b194866..7f34f5d 100644
--- a/gcc/expr.c
+++ b/gcc/expr.c
@@ -8244,7 +8244,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
result to be reduced to the precision of the bit-field type,
which is narrower than that of the type's mode. */
reduce_bit_field = (INTEGRAL_TYPE_P (type)
- && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
+ && !type_has_mode_precision_p (type));
if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
target = 0;
@@ -9097,8 +9097,7 @@ expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
case LROTATE_EXPR:
case RROTATE_EXPR:
gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
- || (GET_MODE_PRECISION (TYPE_MODE (type))
- == TYPE_PRECISION (type)));
+ || type_has_mode_precision_p (type));
/* fall through */
case LSHIFT_EXPR:
@@ -9671,7 +9670,7 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
which is narrower than that of the type's mode. */
reduce_bit_field = (!ignore
&& INTEGRAL_TYPE_P (type)
- && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
+ && !type_has_mode_precision_p (type));
/* If we are going to ignore this result, we need only do something
if there is a side-effect somewhere in the expression. If there
diff --git a/gcc/fold-const.c b/gcc/fold-const.c
index fef9b1a..de60f68 100644
--- a/gcc/fold-const.c
+++ b/gcc/fold-const.c
@@ -6638,8 +6638,7 @@ fold_single_bit_test_into_sign_test (location_t loc,
if (arg00 != NULL_TREE
/* This is only a win if casting to a signed type is cheap,
i.e. when arg00's type is not a partial mode. */
- && TYPE_PRECISION (TREE_TYPE (arg00))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (arg00))))
+ && type_has_mode_precision_p (TREE_TYPE (arg00)))
{
tree stype = signed_type_for (TREE_TYPE (arg00));
return fold_build2_loc (loc, code == EQ_EXPR ? GE_EXPR : LT_EXPR,
diff --git a/gcc/match.pd b/gcc/match.pd
index a5552c5..69dd8193 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -992,7 +992,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
|| GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
/* Or if the precision of TO is not the same as the precision
of its mode. */
- || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
+ || !type_has_mode_precision_p (type)))
(convert (bitop @0 (convert @1))))))
(for bitop (bit_and bit_ior)
@@ -1920,8 +1920,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
if (shift == LSHIFT_EXPR)
zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
else if (shift == RSHIFT_EXPR
- && (TYPE_PRECISION (shift_type)
- == GET_MODE_PRECISION (TYPE_MODE (shift_type))))
+ && type_has_mode_precision_p (shift_type))
{
prec = TYPE_PRECISION (TREE_TYPE (@3));
tree arg00 = @0;
@@ -1931,8 +1930,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& TYPE_UNSIGNED (TREE_TYPE (@0)))
{
tree inner_type = TREE_TYPE (@0);
- if ((TYPE_PRECISION (inner_type)
- == GET_MODE_PRECISION (TYPE_MODE (inner_type)))
+ if (type_has_mode_precision_p (inner_type)
&& TYPE_PRECISION (inner_type) < prec)
{
prec = TYPE_PRECISION (inner_type);
@@ -3226,8 +3224,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(simplify
(cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && (TYPE_PRECISION (TREE_TYPE (@0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
+ && type_has_mode_precision_p (TREE_TYPE (@0))
&& element_precision (@2) >= element_precision (@0)
&& wi::only_sign_bit_p (@1, element_precision (@0)))
(with { tree stype = signed_type_for (TREE_TYPE (@0)); }
@@ -4039,11 +4036,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* The precision of the type of each operand must match the
precision of the mode of each operand, similarly for the
result. */
- && (TYPE_PRECISION (TREE_TYPE (@0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
- && (TYPE_PRECISION (TREE_TYPE (@1))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
- && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
+ && type_has_mode_precision_p (TREE_TYPE (@0))
+ && type_has_mode_precision_p (TREE_TYPE (@1))
+ && type_has_mode_precision_p (type)
/* The inner conversion must be a widening conversion. */
&& TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
&& types_match (@0, type)
@@ -4073,11 +4068,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* The precision of the type of each operand must match the
precision of the mode of each operand, similarly for the
result. */
- && (TYPE_PRECISION (TREE_TYPE (@0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
- && (TYPE_PRECISION (TREE_TYPE (@1))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
- && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
+ && type_has_mode_precision_p (TREE_TYPE (@0))
+ && type_has_mode_precision_p (TREE_TYPE (@1))
+ && type_has_mode_precision_p (type)
/* The inner conversion must be a widening conversion. */
&& TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
&& types_match (@0, @1)
diff --git a/gcc/tree-ssa-forwprop.c b/gcc/tree-ssa-forwprop.c
index 5719b99..5296206 100644
--- a/gcc/tree-ssa-forwprop.c
+++ b/gcc/tree-ssa-forwprop.c
@@ -1529,7 +1529,7 @@ simplify_rotate (gimple_stmt_iterator *gsi)
/* Only create rotates in complete modes. Other cases are not
expanded properly. */
if (!INTEGRAL_TYPE_P (rtype)
- || TYPE_PRECISION (rtype) != GET_MODE_PRECISION (TYPE_MODE (rtype)))
+ || !type_has_mode_precision_p (rtype))
return false;
for (i = 0; i < 2; i++)
@@ -1609,8 +1609,7 @@ simplify_rotate (gimple_stmt_iterator *gsi)
&& INTEGRAL_TYPE_P (TREE_TYPE (cdef_arg1[i]))
&& TYPE_PRECISION (TREE_TYPE (cdef_arg1[i]))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (cdef_arg1[i]))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (cdef_arg1[i]))))
+ && type_has_mode_precision_p (TREE_TYPE (cdef_arg1[i])))
{
def_arg2_alt[i] = cdef_arg1[i];
defcodefor_name (def_arg2_alt[i], &cdef_code[i],
@@ -1639,8 +1638,7 @@ simplify_rotate (gimple_stmt_iterator *gsi)
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem)))
+ && type_has_mode_precision_p (TREE_TYPE (tem))
&& (tem == def_arg2[1 - i]
|| tem == def_arg2_alt[1 - i]))
{
@@ -1667,8 +1665,7 @@ simplify_rotate (gimple_stmt_iterator *gsi)
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem))))
+ && type_has_mode_precision_p (TREE_TYPE (tem)))
defcodefor_name (tem, &code, &tem, NULL);
if (code == NEGATE_EXPR)
@@ -1683,8 +1680,7 @@ simplify_rotate (gimple_stmt_iterator *gsi)
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem)))
+ && type_has_mode_precision_p (TREE_TYPE (tem))
&& (tem == def_arg2[1 - i]
|| tem == def_arg2_alt[1 - i]))
{
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index 87940b6..0d75751 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -3564,8 +3564,7 @@ convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2)
/* We don't want to do bitfield reduction ops. */
if (INTEGRAL_TYPE_P (type)
- && (TYPE_PRECISION (type)
- != GET_MODE_PRECISION (TYPE_MODE (type))))
+ && !type_has_mode_precision_p (type))
return false;
/* If the target doesn't support it, don't generate it. We assume that
diff --git a/gcc/tree-tailcall.c b/gcc/tree-tailcall.c
index e0497e5..c4b8cee 100644
--- a/gcc/tree-tailcall.c
+++ b/gcc/tree-tailcall.c
@@ -289,8 +289,7 @@ process_assignment (gassign *stmt,
type is smaller than mode's precision,
reduce_to_bit_field_precision would generate additional code. */
if (INTEGRAL_TYPE_P (TREE_TYPE (dest))
- && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest)))
- > TYPE_PRECISION (TREE_TYPE (dest))))
+ && !type_has_mode_precision_p (TREE_TYPE (dest)))
return FAIL;
}
diff --git a/gcc/tree-vect-loop.c b/gcc/tree-vect-loop.c
index 906323b..59e41f6 100644
--- a/gcc/tree-vect-loop.c
+++ b/gcc/tree-vect-loop.c
@@ -5848,8 +5848,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
return false;
/* Do not try to vectorize bit-precision reductions. */
- if ((TYPE_PRECISION (scalar_type)
- != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
+ if (!type_has_mode_precision_p (scalar_type))
return false;
/* All uses but the last are expected to be defined in the loop.
diff --git a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
index 17d1083..877711a 100644
--- a/gcc/tree-vect-patterns.c
+++ b/gcc/tree-vect-patterns.c
@@ -2067,8 +2067,7 @@ vect_recog_vector_vector_shift_pattern (vec<gimple *> *stmts,
if (TREE_CODE (oprnd0) != SSA_NAME
|| TREE_CODE (oprnd1) != SSA_NAME
|| TYPE_MODE (TREE_TYPE (oprnd0)) == TYPE_MODE (TREE_TYPE (oprnd1))
- || TYPE_PRECISION (TREE_TYPE (oprnd1))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (oprnd1)))
+ || !type_has_mode_precision_p (TREE_TYPE (oprnd1))
|| TYPE_PRECISION (TREE_TYPE (lhs))
!= TYPE_PRECISION (TREE_TYPE (oprnd0)))
return NULL;
@@ -2470,7 +2469,7 @@ vect_recog_mult_pattern (vec<gimple *> *stmts,
if (TREE_CODE (oprnd0) != SSA_NAME
|| TREE_CODE (oprnd1) != INTEGER_CST
|| !INTEGRAL_TYPE_P (itype)
- || TYPE_PRECISION (itype) != GET_MODE_PRECISION (TYPE_MODE (itype)))
+ || !type_has_mode_precision_p (itype))
return NULL;
vectype = get_vectype_for_scalar_type (itype);
@@ -2585,7 +2584,7 @@ vect_recog_divmod_pattern (vec<gimple *> *stmts,
if (TREE_CODE (oprnd0) != SSA_NAME
|| TREE_CODE (oprnd1) != INTEGER_CST
|| TREE_CODE (itype) != INTEGER_TYPE
- || TYPE_PRECISION (itype) != GET_MODE_PRECISION (TYPE_MODE (itype)))
+ || !type_has_mode_precision_p (itype))
return NULL;
vectype = get_vectype_for_scalar_type (itype);
diff --git a/gcc/tree-vect-stmts.c b/gcc/tree-vect-stmts.c
index ee32c56..0629c12 100644
--- a/gcc/tree-vect-stmts.c
+++ b/gcc/tree-vect-stmts.c
@@ -4098,11 +4098,9 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
&& ((INTEGRAL_TYPE_P (lhs_type)
- && (TYPE_PRECISION (lhs_type)
- != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
+ && !type_has_mode_precision_p (lhs_type))
|| (INTEGRAL_TYPE_P (rhs_type)
- && (TYPE_PRECISION (rhs_type)
- != GET_MODE_PRECISION (TYPE_MODE (rhs_type))))))
+ && !type_has_mode_precision_p (rhs_type))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -4696,10 +4694,8 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
if ((CONVERT_EXPR_CODE_P (code)
|| code == VIEW_CONVERT_EXPR)
&& INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
- && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
- || ((TYPE_PRECISION (TREE_TYPE (op))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
+ && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
+ || !type_has_mode_precision_p (TREE_TYPE (op)))
/* But a conversion that does not change the bit-pattern is ok. */
&& !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
> TYPE_PRECISION (TREE_TYPE (op)))
@@ -4875,8 +4871,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
scalar_dest = gimple_assign_lhs (stmt);
vectype_out = STMT_VINFO_VECTYPE (stmt_info);
- if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -5264,8 +5259,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
/* Most operations cannot handle bit-precision types without extra
truncations. */
if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
- && (TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
/* Exception are bitwise binary operations. */
&& code != BIT_IOR_EXPR
&& code != BIT_XOR_EXPR
diff --git a/gcc/tree-vrp.c b/gcc/tree-vrp.c
index e3735ff..e1038a1 100644
--- a/gcc/tree-vrp.c
+++ b/gcc/tree-vrp.c
@@ -5247,7 +5247,7 @@ register_edge_assert_for_2 (tree name, edge e,
&& tree_fits_uhwi_p (cst2)
&& INTEGRAL_TYPE_P (TREE_TYPE (name2))
&& IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
- && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))))
+ && type_has_mode_precision_p (TREE_TYPE (val)))
{
mask = wi::mask (tree_to_uhwi (cst2), false, prec);
val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
diff --git a/gcc/tree.h b/gcc/tree.h
index 47899ce..c855a4c 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -5414,4 +5414,13 @@ struct builtin_structptr_type
const char *str;
};
extern const builtin_structptr_type builtin_structptr_types[6];
+
+/* Return true if type T has the same precision as its underlying mode. */
+
+inline bool
+type_has_mode_precision_p (const_tree t)
+{
+ return TYPE_PRECISION (t) == GET_MODE_PRECISION (TYPE_MODE (t));
+}
+
#endif /* GCC_TREE_H */