aboutsummaryrefslogtreecommitdiff
path: root/gcc/match.pd
diff options
context:
space:
mode:
authorMarc Glisse <marc.glisse@inria.fr>2020-08-07 18:49:04 +0200
committerMarc Glisse <marc.glisse@inria.fr>2020-08-07 18:49:04 +0200
commita1ee6d507b0c26466be519d177f5a08b22f63647 (patch)
tree5da578f99b7f1a468969a38129f9f4a345353e05 /gcc/match.pd
parent6c3ae88d1e13b71665d1b27821159dcbea410267 (diff)
downloadgcc-a1ee6d507b0c26466be519d177f5a08b22f63647.zip
gcc-a1ee6d507b0c26466be519d177f5a08b22f63647.tar.gz
gcc-a1ee6d507b0c26466be519d177f5a08b22f63647.tar.bz2
Disable some VEC_COND_EXPR transformations after vector lowering
ARM understands VEC_COND_EXPR<v == w, -1, 0> but not a plain v == w which is fed to something other than VEC_COND_EXPR (say BIT_IOR_EXPR). This patch avoids introducing the second kind of statement after the vector lowering pass, which is the last chance to turn v == w back into something the target handles. This is just a workaround to avoid ICEs, a v == w produced before vector lowering will yield pretty bad code. Either the arm target needs to learn to handle vector comparisons (aarch64 already does), or the middle-end needs to fall back to vcond when plain comparisons are not supported (or ...). 2020-08-07 Marc Glisse <marc.glisse@inria.fr> * generic-match-head.c (optimize_vectors_before_lowering_p): New function. * gimple-match-head.c (optimize_vectors_before_lowering_p): Likewise. * match.pd ((v ? w : 0) ? a : b, c1 ? c2 ? a : b : b): Use it.
Diffstat (limited to 'gcc/match.pd')
-rw-r--r--gcc/match.pd20
1 files changed, 11 insertions, 9 deletions
diff --git a/gcc/match.pd b/gcc/match.pd
index d8e3927..7e5c5a6 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -3461,40 +3461,42 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(vec_cond @0 (op! @3 @1) (op! @3 @2))))
#endif
-/* (v ? w : 0) ? a : b is just (v & w) ? a : b */
+/* (v ? w : 0) ? a : b is just (v & w) ? a : b
+ Currently disabled after pass lvec because ARM understands
+ VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
(simplify
(vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_and @0 @3) @1 @2)))
(simplify
(vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_ior @0 @3) @1 @2)))
(simplify
(vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_ior @0 (bit_not @3)) @2 @1)))
(simplify
(vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
/* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */
(simplify
(vec_cond @0 (vec_cond:s @1 @2 @3) @3)
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_and @0 @1) @2 @3)))
(simplify
(vec_cond @0 @2 (vec_cond:s @1 @2 @3))
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_ior @0 @1) @2 @3)))
(simplify
(vec_cond @0 (vec_cond:s @1 @2 @3) @2)
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_ior (bit_not @0) @1) @2 @3)))
(simplify
(vec_cond @0 @3 (vec_cond:s @1 @2 @3))
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_and (bit_not @0) @1) @2 @3)))
/* Simplification moved from fold_cond_expr_with_comparison. It may also