aboutsummaryrefslogtreecommitdiff
path: root/gcc/match.pd
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/match.pd')
-rw-r--r--gcc/match.pd103
1 files changed, 87 insertions, 16 deletions
diff --git a/gcc/match.pd b/gcc/match.pd
index ec2f560..82e6e29 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -3595,23 +3595,34 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
return (T)x;
}
while WT is uint128_t, T is uint8_t, uint16_t, uint32_t or uint64_t. */
- (convert@4 (min (widen_mult:c@3 (convert@5 (convert @0))
- (convert@6 (convert @1)))
+ (convert (min (widen_mult:c@3 (convert@4 (convert @0))
+ (convert@5 (convert @1)))
INTEGER_CST@2))
- (if (types_match (type, @0, @1) && types_match (type, @4))
+ (if (types_match (type, @0, @1))
(with
{
unsigned prec = TYPE_PRECISION (type);
unsigned widen_prec = TYPE_PRECISION (TREE_TYPE (@3));
+ unsigned cvt4_prec = TYPE_PRECISION (TREE_TYPE (@4));
unsigned cvt5_prec = TYPE_PRECISION (TREE_TYPE (@5));
- unsigned cvt6_prec = TYPE_PRECISION (TREE_TYPE (@6));
- unsigned hw_int_prec = sizeof (HOST_WIDE_INT) * 8;
wide_int c2 = wi::to_wide (@2);
wide_int max = wi::mask (prec, false, widen_prec);
bool c2_is_max_p = wi::eq_p (c2, max);
- bool widen_mult_p = cvt5_prec == cvt6_prec && hw_int_prec == cvt5_prec;
+ bool widen_mult_p = cvt4_prec == cvt5_prec && widen_prec == cvt5_prec * 2;
}
(if (widen_prec > prec && c2_is_max_p && widen_mult_p)))))
+ (match (unsigned_integer_sat_mul @0 @1)
+ (convert (min (mult:c@3 (convert @0) (convert @1)) INTEGER_CST@2))
+ (if (types_match (type, @0, @1))
+ (with
+ {
+ unsigned prec = TYPE_PRECISION (type);
+ unsigned widen_prec = TYPE_PRECISION (TREE_TYPE (@3));
+ wide_int c2 = wi::to_wide (@2);
+ wide_int max = wi::mask (prec, false, widen_prec);
+ bool c2_is_max_p = wi::eq_p (c2, max);
+ }
+ (if (widen_prec > prec && c2_is_max_p)))))
)
/* The boundary condition for case 10: IMM = 1:
@@ -3716,6 +3727,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if ((TREE_CODE (@1) == INTEGER_CST
&& TREE_CODE (@2) == INTEGER_CST)
|| ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
+ || (VECTOR_TYPE_P (TREE_TYPE (@1))
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, code2))
|| POINTER_TYPE_P (TREE_TYPE (@1)))
&& bitwise_equal_p (@1, @2)))
(with
@@ -3754,27 +3767,39 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (code1 == EQ_EXPR && val) @3)
(if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
(if (code1 == NE_EXPR && !val && allbits) @4)
- (if (code1 == NE_EXPR
+ (if ((code1 == NE_EXPR
&& code2 == GE_EXPR
&& cmp == 0
&& allbits)
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, GT_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(gt @c0 (convert @1)))
- (if (code1 == NE_EXPR
+ (if ((code1 == NE_EXPR
&& code2 == LE_EXPR
&& cmp == 0
&& allbits)
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, LT_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(lt @c0 (convert @1)))
/* (a != (b+1)) & (a > b) -> a > (b+1) */
- (if (code1 == NE_EXPR
+ (if ((code1 == NE_EXPR
&& code2 == GT_EXPR
&& one_after
&& allbits)
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, GT_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(gt @c0 (convert @1)))
/* (a != (b-1)) & (a < b) -> a < (b-1) */
- (if (code1 == NE_EXPR
+ (if ((code1 == NE_EXPR
&& code2 == LT_EXPR
&& one_before
&& allbits)
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, LT_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(lt @c0 (convert @1)))
)
)
@@ -3793,6 +3818,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if ((TREE_CODE (@1) == INTEGER_CST
&& TREE_CODE (@2) == INTEGER_CST)
|| ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
+ || (VECTOR_TYPE_P (TREE_TYPE (@1))
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, code2))
|| POINTER_TYPE_P (TREE_TYPE (@1)))
&& operand_equal_p (@1, @2)))
(with
@@ -3843,6 +3870,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if ((TREE_CODE (@1) == INTEGER_CST
&& TREE_CODE (@2) == INTEGER_CST)
|| ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
+ || (VECTOR_TYPE_P (TREE_TYPE (@1)))
|| POINTER_TYPE_P (TREE_TYPE (@1)))
&& bitwise_equal_p (@1, @2)))
(with
@@ -3884,24 +3912,36 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (code1 == EQ_EXPR
&& code2 == GT_EXPR
&& cmp == 0
- && allbits)
+ && allbits
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, GE_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(ge @c0 @2))
(if (code1 == EQ_EXPR
&& code2 == LT_EXPR
&& cmp == 0
- && allbits)
+ && allbits
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, LE_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(le @c0 @2))
/* (a == (b-1)) | (a >= b) -> a >= (b-1) */
(if (code1 == EQ_EXPR
&& code2 == GE_EXPR
&& one_before
- && allbits)
+ && allbits
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, GE_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(ge @c0 (convert @1)))
/* (a == (b+1)) | (a <= b) -> a <= (b-1) */
(if (code1 == EQ_EXPR
&& code2 == LE_EXPR
&& one_after
- && allbits)
+ && allbits
+ && ((VECTOR_BOOLEAN_TYPE_P (type)
+ && expand_vec_cmp_expr_p (TREE_TYPE (@1), type, LE_EXPR))
+ || !VECTOR_TYPE_P (TREE_TYPE (@1))))
(le @c0 (convert @1)))
)
)
@@ -3966,7 +4006,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
rcmp (ne le gt ne lt ge)
(simplify
(op:c (cmp1:c @0 @1) (cmp2 @0 @1))
- (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
+ (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ || POINTER_TYPE_P (TREE_TYPE (@0))
+ || (VECTOR_TYPE_P (TREE_TYPE (@1))
+ && expand_vec_cmp_expr_p (TREE_TYPE (@0), type, rcmp)))
(rcmp @0 @1)))))
/* Optimize (a CMP b) == (a CMP b) */
@@ -3975,7 +4018,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
rcmp (eq gt le eq ge lt)
(simplify
(eq:c (cmp1:c @0 @1) (cmp2 @0 @1))
- (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
+ (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ || POINTER_TYPE_P (TREE_TYPE (@0))
+ || (VECTOR_TYPE_P (TREE_TYPE (@0))
+ && expand_vec_cmp_expr_p (TREE_TYPE (@0), type, rcmp)))
(rcmp @0 @1))))
/* (type)([0,1]@a != 0) -> (type)a
@@ -5981,6 +6027,15 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
&& !expand_vec_cond_expr_p (TREE_TYPE (@1), TREE_TYPE (@0)))))
(vec_cond @0 (op! @1 @3) (op! @2 @4))))
+/* (@0 ? @2 : @3) lop (@1 ? @2 : @3) --> (@0 lop @1) ? @2 : @3. */
+(for lop (bit_and bit_ior bit_xor)
+ (simplify
+ (lop
+ (vec_cond @0 integer_minus_onep@2 integer_zerop@3)
+ (vec_cond @1 @2 @3))
+ (if (target_supports_op_p (TREE_TYPE (@0), lop, optab_vector))
+ (vec_cond (lop @0 @1) @2 @3))))
+
/* (c ? a : b) op d --> c ? (a op d) : (b op d) */
(simplify
(op (vec_cond:s @0 @1 @2) @3)
@@ -6992,6 +7047,22 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(icmp @0 @1)
(if (ic == ncmp)
(ncmp @0 @1)))))
+ /* ((cast)cmp) - 1 -> -(cast)icmp . */
+ (simplify
+ (plus (convert? (cmp@2 @0 @1)) integer_minus_onep)
+ (if (TYPE_PRECISION (type) > 1
+ && INTEGRAL_TYPE_P (TREE_TYPE (@2)) && TYPE_PRECISION (TREE_TYPE (@2)) == 1)
+ /* Comparison inversion may be impossible for trapping math,
+ invert_tree_comparison will tell us. But we can't use
+ a computed operator in the replacement tree thus we have
+ to play the trick below. */
+ (with { enum tree_code ic = invert_tree_comparison
+ (cmp, HONOR_NANS (@0));
+ tree cmptype = TREE_TYPE (@2); }
+ (if (ic == icmp)
+ (negate (convert (icmp:cmptype @0 @1)))
+ (if (ic == ncmp)
+ (negate (convert (ncmp:cmptype @0 @1))))))))
/* The following bits are handled by fold_binary_op_with_conditional_arg. */
(simplify
(ne (cmp@2 @0 @1) integer_zerop)