aboutsummaryrefslogtreecommitdiff
path: root/gcc/match.pd
diff options
context:
space:
mode:
authorAndrew Pinski <apinski@marvell.com>2021-05-22 19:49:50 +0000
committerAndrew Pinski <apinski@marvell.com>2021-05-26 00:46:31 +0000
commit1fd76b24306ed4df4cf9e797d900699ed59ce7f7 (patch)
tree21638bbbf36429bc99662ceafab24dc0cda85ae0 /gcc/match.pd
parentb6bdd7a4cb41ee057f2d064fffcb00f23ce6b497 (diff)
downloadgcc-1fd76b24306ed4df4cf9e797d900699ed59ce7f7.zip
gcc-1fd76b24306ed4df4cf9e797d900699ed59ce7f7.tar.gz
gcc-1fd76b24306ed4df4cf9e797d900699ed59ce7f7.tar.bz2
Optimize x < 0 ? ~y : y to (x >> 31) ^ y in match.pd
This copies the optimization that is done in phiopt for "x < 0 ? ~y : y to (x >> 31) ^ y" into match.pd. The code for phiopt is kept around until phiopt uses match.pd (which I am working towards). Note the original testcase is now optimized early on and I added a new testcase to optimize during phiopt. OK? Bootstrapped and tested on x86_64-linux-gnu with no regressions. Thanks, Andrew Pinski Differences from v1: V2: Add check for integeral type to make sure vector types are not done. gcc: * match.pd (x < 0 ? ~y : y): New patterns. gcc/testsuite: * gcc.dg/tree-ssa/pr96928.c: Update test for slightly different IR. * gcc.dg/tree-ssa/pr96928-1.c: New testcase.
Diffstat (limited to 'gcc/match.pd')
-rw-r--r--gcc/match.pd32
1 files changed, 32 insertions, 0 deletions
diff --git a/gcc/match.pd b/gcc/match.pd
index ad6b057..dd73081 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -4875,6 +4875,38 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(cmp (bit_and@2 @0 integer_pow2p@1) @1)
(icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
+(for cmp (ge lt)
+/* x < 0 ? ~y : y into (x >> (prec-1)) ^ y. */
+/* x >= 0 ? ~y : y into ~((x >> (prec-1)) ^ y). */
+ (simplify
+ (cond (cmp @0 integer_zerop) (bit_not @1) @1)
+ (if (INTEGRAL_TYPE_P (type)
+ && INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && !TYPE_UNSIGNED (TREE_TYPE (@0))
+ && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
+ (with
+ {
+ tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
+ }
+ (if (cmp == LT_EXPR)
+ (bit_xor (convert (rshift @0 {shifter;})) @1)
+ (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1))))))
+/* x < 0 ? y : ~y into ~((x >> (prec-1)) ^ y). */
+/* x >= 0 ? y : ~y into (x >> (prec-1)) ^ y. */
+ (simplify
+ (cond (cmp @0 integer_zerop) @1 (bit_not @1))
+ (if (INTEGRAL_TYPE_P (type)
+ && INTEGRAL_TYPE_P (TREE_TYPE (@0))
+ && !TYPE_UNSIGNED (TREE_TYPE (@0))
+ && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
+ (with
+ {
+ tree shifter = build_int_cst (integer_type_node, TYPE_PRECISION (type) - 1);
+ }
+ (if (cmp == GE_EXPR)
+ (bit_xor (convert (rshift @0 {shifter;})) @1)
+ (bit_not (bit_xor (convert (rshift @0 {shifter;})) @1)))))))
+
/* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
convert this into a shift followed by ANDing with D. */
(simplify