aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Biener <rguenther@suse.de>2024-12-05 13:24:27 +0100
committerRichard Biener <rguenth@gcc.gnu.org>2024-12-06 10:29:08 +0100
commit1c07f7a1b45b3d6f46cb25f3bf068d94fe4131e8 (patch)
treeafb32d46b00c7e45a95a6101e34535c5f93fde12 /gcc
parentfeea589d78fd5ebe1c02cf937e184d2c66cd99ed (diff)
downloadgcc-1c07f7a1b45b3d6f46cb25f3bf068d94fe4131e8.zip
gcc-1c07f7a1b45b3d6f46cb25f3bf068d94fe4131e8.tar.gz
gcc-1c07f7a1b45b3d6f46cb25f3bf068d94fe4131e8.tar.bz2
Remove some duplicates reported by genmatch
genmatch currently has a difficulty to decide whether a duplicate structural match is really duplicate as uses of captures within predicates or in C code can be order dependent. For example a reported duplicate results in { tree captures[4] ATTRIBUTE_UNUSED = { _p1, _p0, _q20, _q21 } if (gimple_simplify_112 (res_op, seq, valueize, type, captures)) return true; } { tree captures[4] ATTRIBUTE_UNUSED = { _p1, _p0, _q21, _q20 }; if (gimple_simplify_112 (res_op, seq, valueize, type, captures)) return true; } where the difference is only in _q20 and _q21 being swapped but that resulting in a call to bitwise_inverted_equal_p (_p1, X) with X once _q20 and once _q21. That is, we treat bare captures as equal for reporting duplicates. Due to bitwise_inverted_equal_p there are meanwhile a _lot_ of duplicates reported that are not actual duplicates. The following removes some that are though, as the operands are only passed to types_match. * match.pd (.SAT_ADD patterns using IFN_ADD_OVERFLOW): Remove :c that only causes duplicate patterns.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/match.pd10
1 files changed, 5 insertions, 5 deletions
diff --git a/gcc/match.pd b/gcc/match.pd
index 650c3f4..d3aabae 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -3146,30 +3146,30 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(match (unsigned_integer_sat_add @0 @1)
/* SUM = ADD_OVERFLOW (X, Y)
SAT_U_ADD = REALPART (SUM) | -IMAGPART (SUM) */
- (bit_ior:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) (negate (imagpart @2)))
+ (bit_ior:c (realpart (IFN_ADD_OVERFLOW@2 @0 @1)) (negate (imagpart @2)))
(if (types_match (type, @0, @1))))
(match (unsigned_integer_sat_add @0 @1)
/* SUM = ADD_OVERFLOW (X, Y)
SAT_U_ADD = REALPART (SUM) | -(IMAGPART (SUM) != 0) */
- (bit_ior:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1))
+ (bit_ior:c (realpart (IFN_ADD_OVERFLOW@2 @0 @1))
(negate (convert (ne (imagpart @2) integer_zerop))))
(if (types_match (type, @0, @1))))
(match (unsigned_integer_sat_add @0 @1)
/* SUM = ADD_OVERFLOW (X, Y)
SAT_U_ADD = IMAGPART (SUM) == 0 ? REALPART (SUM) : -1 */
- (cond^ (eq (imagpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) integer_zerop)
+ (cond^ (eq (imagpart (IFN_ADD_OVERFLOW@2 @0 @1)) integer_zerop)
(realpart @2) integer_minus_onep)
(if (types_match (type, @0, @1))))
(match (unsigned_integer_sat_add @0 @1)
/* SUM = ADD_OVERFLOW (X, Y)
SAT_U_ADD = IMAGPART (SUM) != 0 ? -1 : REALPART (SUM) */
- (cond^ (ne (imagpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) integer_zerop)
+ (cond^ (ne (imagpart (IFN_ADD_OVERFLOW@2 @0 @1)) integer_zerop)
integer_minus_onep (realpart @2))
(if (types_match (type, @0, @1))))
(match (unsigned_integer_sat_add @0 @1)
/* SUM = ADD_OVERFLOW (X, IMM)
SAT_U_ADD = IMAGPART (SUM) != 0 ? -1 : REALPART (SUM) */
- (cond^ (ne (imagpart (IFN_ADD_OVERFLOW:c@2 @0 INTEGER_CST@1)) integer_zerop)
+ (cond^ (ne (imagpart (IFN_ADD_OVERFLOW@2 @0 INTEGER_CST@1)) integer_zerop)
integer_minus_onep (realpart @2))
(if (types_match (type, @0) && int_fits_type_p (@1, type)))))