aboutsummaryrefslogtreecommitdiff
path: root/gcc/gimple-match-head.c
diff options
context:
space:
mode:
authorMarc Glisse <marc.glisse@inria.fr>2020-08-07 18:49:04 +0200
committerMarc Glisse <marc.glisse@inria.fr>2020-08-07 18:49:04 +0200
commita1ee6d507b0c26466be519d177f5a08b22f63647 (patch)
tree5da578f99b7f1a468969a38129f9f4a345353e05 /gcc/gimple-match-head.c
parent6c3ae88d1e13b71665d1b27821159dcbea410267 (diff)
downloadgcc-a1ee6d507b0c26466be519d177f5a08b22f63647.zip
gcc-a1ee6d507b0c26466be519d177f5a08b22f63647.tar.gz
gcc-a1ee6d507b0c26466be519d177f5a08b22f63647.tar.bz2
Disable some VEC_COND_EXPR transformations after vector lowering
ARM understands VEC_COND_EXPR<v == w, -1, 0> but not a plain v == w which is fed to something other than VEC_COND_EXPR (say BIT_IOR_EXPR). This patch avoids introducing the second kind of statement after the vector lowering pass, which is the last chance to turn v == w back into something the target handles. This is just a workaround to avoid ICEs, a v == w produced before vector lowering will yield pretty bad code. Either the arm target needs to learn to handle vector comparisons (aarch64 already does), or the middle-end needs to fall back to vcond when plain comparisons are not supported (or ...). 2020-08-07 Marc Glisse <marc.glisse@inria.fr> * generic-match-head.c (optimize_vectors_before_lowering_p): New function. * gimple-match-head.c (optimize_vectors_before_lowering_p): Likewise. * match.pd ((v ? w : 0) ? a : b, c1 ? c2 ? a : b : b): Use it.
Diffstat (limited to 'gcc/gimple-match-head.c')
-rw-r--r--gcc/gimple-match-head.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/gcc/gimple-match-head.c b/gcc/gimple-match-head.c
index 9b3e729..4a65be7 100644
--- a/gcc/gimple-match-head.c
+++ b/gcc/gimple-match-head.c
@@ -1158,6 +1158,16 @@ canonicalize_math_after_vectorization_p ()
return !cfun || (cfun->curr_properties & PROP_gimple_lvec) != 0;
}
+/* Return true if we can still perform transformations that may introduce
+ vector operations that are not supported by the target. Vector lowering
+ normally handles those, but after that pass, it becomes unsafe. */
+
+static inline bool
+optimize_vectors_before_lowering_p ()
+{
+ return !cfun || (cfun->curr_properties & PROP_gimple_lvec) == 0;
+}
+
/* Return true if pow(cst, x) should be optimized into exp(log(cst) * x).
As a workaround for SPEC CPU2017 628.pop2_s, don't do it if arg0
is an exact integer, arg1 = phi_res +/- cst1 and phi_res = PHI <cst2, ...>