aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorMarc Glisse <marc.glisse@inria.fr>2020-08-07 18:49:04 +0200
committerMarc Glisse <marc.glisse@inria.fr>2020-08-07 18:49:04 +0200
commita1ee6d507b0c26466be519d177f5a08b22f63647 (patch)
tree5da578f99b7f1a468969a38129f9f4a345353e05 /gcc
parent6c3ae88d1e13b71665d1b27821159dcbea410267 (diff)
downloadgcc-a1ee6d507b0c26466be519d177f5a08b22f63647.zip
gcc-a1ee6d507b0c26466be519d177f5a08b22f63647.tar.gz
gcc-a1ee6d507b0c26466be519d177f5a08b22f63647.tar.bz2
Disable some VEC_COND_EXPR transformations after vector lowering
ARM understands VEC_COND_EXPR<v == w, -1, 0> but not a plain v == w which is fed to something other than VEC_COND_EXPR (say BIT_IOR_EXPR). This patch avoids introducing the second kind of statement after the vector lowering pass, which is the last chance to turn v == w back into something the target handles. This is just a workaround to avoid ICEs, a v == w produced before vector lowering will yield pretty bad code. Either the arm target needs to learn to handle vector comparisons (aarch64 already does), or the middle-end needs to fall back to vcond when plain comparisons are not supported (or ...). 2020-08-07 Marc Glisse <marc.glisse@inria.fr> * generic-match-head.c (optimize_vectors_before_lowering_p): New function. * gimple-match-head.c (optimize_vectors_before_lowering_p): Likewise. * match.pd ((v ? w : 0) ? a : b, c1 ? c2 ? a : b : b): Use it.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/generic-match-head.c10
-rw-r--r--gcc/gimple-match-head.c10
-rw-r--r--gcc/match.pd20
3 files changed, 31 insertions, 9 deletions
diff --git a/gcc/generic-match-head.c b/gcc/generic-match-head.c
index 2454baa..fdb528d 100644
--- a/gcc/generic-match-head.c
+++ b/gcc/generic-match-head.c
@@ -80,6 +80,16 @@ canonicalize_math_after_vectorization_p ()
return false;
}
+/* Return true if we can still perform transformations that may introduce
+ vector operations that are not supported by the target. Vector lowering
+ normally handles those, but after that pass, it becomes unsafe. */
+
+static inline bool
+optimize_vectors_before_lowering_p ()
+{
+ return true;
+}
+
/* Return true if successive divisions can be optimized.
Defer to GIMPLE opts. */
diff --git a/gcc/gimple-match-head.c b/gcc/gimple-match-head.c
index 9b3e729..4a65be7 100644
--- a/gcc/gimple-match-head.c
+++ b/gcc/gimple-match-head.c
@@ -1158,6 +1158,16 @@ canonicalize_math_after_vectorization_p ()
return !cfun || (cfun->curr_properties & PROP_gimple_lvec) != 0;
}
+/* Return true if we can still perform transformations that may introduce
+ vector operations that are not supported by the target. Vector lowering
+ normally handles those, but after that pass, it becomes unsafe. */
+
+static inline bool
+optimize_vectors_before_lowering_p ()
+{
+ return !cfun || (cfun->curr_properties & PROP_gimple_lvec) == 0;
+}
+
/* Return true if pow(cst, x) should be optimized into exp(log(cst) * x).
As a workaround for SPEC CPU2017 628.pop2_s, don't do it if arg0
is an exact integer, arg1 = phi_res +/- cst1 and phi_res = PHI <cst2, ...>
diff --git a/gcc/match.pd b/gcc/match.pd
index d8e3927..7e5c5a6 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -3461,40 +3461,42 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
(vec_cond @0 (op! @3 @1) (op! @3 @2))))
#endif
-/* (v ? w : 0) ? a : b is just (v & w) ? a : b */
+/* (v ? w : 0) ? a : b is just (v & w) ? a : b
+ Currently disabled after pass lvec because ARM understands
+ VEC_COND_EXPR<v==w,-1,0> but not a plain v==w fed to BIT_IOR_EXPR. */
(simplify
(vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_and @0 @3) @1 @2)))
(simplify
(vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_ior @0 @3) @1 @2)))
(simplify
(vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_ior @0 (bit_not @3)) @2 @1)))
(simplify
(vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2)
- (if (types_match (@0, @3))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
(vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
/* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */
(simplify
(vec_cond @0 (vec_cond:s @1 @2 @3) @3)
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_and @0 @1) @2 @3)))
(simplify
(vec_cond @0 @2 (vec_cond:s @1 @2 @3))
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_ior @0 @1) @2 @3)))
(simplify
(vec_cond @0 (vec_cond:s @1 @2 @3) @2)
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_ior (bit_not @0) @1) @2 @3)))
(simplify
(vec_cond @0 @3 (vec_cond:s @1 @2 @3))
- (if (types_match (@0, @1))
+ (if (optimize_vectors_before_lowering_p () && types_match (@0, @1))
(vec_cond (bit_and (bit_not @0) @1) @2 @3)))
/* Simplification moved from fold_cond_expr_with_comparison. It may also