aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2023-05-21 13:36:56 +0200
committerJakub Jelinek <jakub@redhat.com>2023-05-21 13:36:56 +0200
commitf211757f6fa9515e3fd1a4f66f1a8b48e500c9de (patch)
treec0dcf24b10972c6e7d046b96bb418b1556ede9fa /gcc
parent7f3df8e65c71e5df01fe7fe7de577bb9ff48f37b (diff)
downloadgcc-f211757f6fa9515e3fd1a4f66f1a8b48e500c9de.zip
gcc-f211757f6fa9515e3fd1a4f66f1a8b48e500c9de.tar.gz
gcc-f211757f6fa9515e3fd1a4f66f1a8b48e500c9de.tar.bz2
atch.pd: Ensure (op CONSTANT_CLASS_P CONSTANT_CLASS_P) is simplified [PR109505]
On the following testcase we hang, because POLY_INT_CST is CONSTANT_CLASS_P, but BIT_AND_EXPR with it and INTEGER_CST doesn't simplify and the (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) simplification actually relies on the (CST1 & CST2) simplification, otherwise it is a deoptimization, trading 2 ops for 3 and furthermore running into /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both operands are another bit-wise operation with a common input. If so, distribute the bit operations to save an operation and possibly two if constants are involved. For example, convert (A | B) & (A | C) into A | (B & C) Further simplification will occur if B and C are constants. */ simplification which simplifies that (x & CST2) | (CST1 & CST2) back to CST2 & (x | CST1). I went through all other places I could find where we have a simplification with 2 CONSTANT_CLASS_P operands and perform some operation on those two, while the other spots aren't that severe (just trade 2 operations for another 2 if the two constants don't simplify, rather than as in the above case trading 2 ops for 3), I still think all those spots really intend to optimize only if the 2 constants simplify. So, the following patch adds to those a ! modifier to ensure that, even at GENERIC that modifier means !EXPR_P which is exactly what we want IMHO. 2023-05-21 Jakub Jelinek <jakub@redhat.com> PR tree-optimization/109505 * match.pd ((x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2), Combine successive equal operations with constants, (A +- CST1) +- CST2 -> A + CST3, (CST1 - A) +- CST2 -> CST3 - A, CST1 - (CST2 - A) -> CST3 + A): Use ! on ops with 2 CONSTANT_CLASS_P operands. * gcc.target/aarch64/sve/pr109505.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/match.pd20
-rw-r--r--gcc/testsuite/gcc.target/aarch64/sve/pr109505.c12
2 files changed, 22 insertions, 10 deletions
diff --git a/gcc/match.pd b/gcc/match.pd
index 30ffdfc..1fe0559 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -1916,7 +1916,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
(simplify
(bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
- (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
+ (bit_ior (bit_and @0 @2) (bit_and! @1 @2)))
/* Combine successive equal operations with constants. */
(for bitop (bit_and bit_ior bit_xor)
@@ -1925,7 +1925,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(if (!CONSTANT_CLASS_P (@0))
/* This is the canonical form regardless of whether (bitop @1 @2) can be
folded to a constant. */
- (bitop @0 (bitop @1 @2))
+ (bitop @0 (bitop! @1 @2))
/* In this case we have three constants and (bitop @0 @1) doesn't fold
to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
the values involved are such that the operation can't be decided at
@@ -2998,13 +2998,13 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
forever if something doesn't simplify into a constant. */
(if (!CONSTANT_CLASS_P (@0))
(if (outer_op == PLUS_EXPR)
- (plus (view_convert @0) (inner_op @2 (view_convert @1)))
- (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
+ (plus (view_convert @0) (inner_op! @2 (view_convert @1)))
+ (minus (view_convert @0) (neg_inner_op! @2 (view_convert @1)))))
(if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
(if (outer_op == PLUS_EXPR)
- (view_convert (plus @0 (inner_op (view_convert @2) @1)))
- (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
+ (view_convert (plus @0 (inner_op! (view_convert @2) @1)))
+ (view_convert (minus @0 (neg_inner_op! (view_convert @2) @1))))
/* If the constant operation overflows we cannot do the transform
directly as we would introduce undefined overflow, for example
with (a - 1) + INT_MIN. */
@@ -3035,10 +3035,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
forever if something doesn't simplify into a constant. */
(if (!CONSTANT_CLASS_P (@0))
- (minus (outer_op (view_convert @1) @2) (view_convert @0)))
+ (minus (outer_op! (view_convert @1) @2) (view_convert @0)))
(if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
- (view_convert (minus (outer_op @1 (view_convert @2)) @0))
+ (view_convert (minus (outer_op! @1 (view_convert @2)) @0))
(if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
(with { tree cst = const_binop (outer_op, type, @1, @2); }
(if (cst && !TREE_OVERFLOW (cst))
@@ -3054,10 +3054,10 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
/* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
forever if something doesn't simplify into a constant. */
(if (!CONSTANT_CLASS_P (@0))
- (plus (view_convert @0) (minus @1 (view_convert @2))))
+ (plus (view_convert @0) (minus! @1 (view_convert @2))))
(if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
|| TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
- (view_convert (plus @0 (minus (view_convert @1) @2)))
+ (view_convert (plus @0 (minus! (view_convert @1) @2)))
(if (types_match (type, @0) && !TYPE_OVERFLOW_SANITIZED (type))
(with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
(if (cst && !TREE_OVERFLOW (cst))
diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pr109505.c b/gcc/testsuite/gcc.target/aarch64/sve/pr109505.c
new file mode 100644
index 0000000..b975ae7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/sve/pr109505.c
@@ -0,0 +1,12 @@
+/* PR tree-optimization/109505 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=armv8.2-a+sve" } */
+
+#pragma GCC aarch64 "arm_sve.h"
+
+unsigned long
+foo (unsigned long x)
+{
+ unsigned long y = svcntb ();
+ return (x | 15) & y;
+}