aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Biener <rguenther@suse.de>2023-06-19 12:28:32 +0200
committerRichard Biener <rguenther@suse.de>2023-06-21 09:10:13 +0200
commit24c125fe47ac95f9e83f7e2bfa8594592a76368f (patch)
tree6f817e4881a088b28d4f55c057d6f8b335c93997 /gcc
parent864c6471bdc6cdec6da60b66ac13e9fe3cd73fb8 (diff)
downloadgcc-24c125fe47ac95f9e83f7e2bfa8594592a76368f.zip
gcc-24c125fe47ac95f9e83f7e2bfa8594592a76368f.tar.gz
gcc-24c125fe47ac95f9e83f7e2bfa8594592a76368f.tar.bz2
[i386] Reject too large vectors for partial vector vectorization
The following works around the lack of the x86 backend making the vectorizer compare the costs of the different possible vector sizes the backed advertises through the vector_modes hook. When enabling masked epilogues or main loops then this means we will select the prefered vector mode which is usually the largest even for loops that do not iterate close to the times the vector has lanes. When not using masking the vectorizer would reject any mode resulting in a VF bigger than the number of iterations but with masking they are simply masked out. So this overloads the finish_cost function and matches for the problematic case, forcing a high cost to make us try a smaller vector size. * config/i386/i386.cc (ix86_vector_costs::finish_cost): Overload. For masked main loops make sure the vectorization factor isn't more than double the number of iterations. * gcc.target/i386/vect-partial-vectors-1.c: New testcase. * gcc.target/i386/vect-partial-vectors-2.c: Likewise.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/i386/i386.cc26
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-partial-vectors-1.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/vect-partial-vectors-2.c12
3 files changed, 51 insertions, 0 deletions
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index b20cb86..32851a5 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -23666,6 +23666,7 @@ class ix86_vector_costs : public vector_costs
stmt_vec_info stmt_info, slp_tree node,
tree vectype, int misalign,
vect_cost_model_location where) override;
+ void finish_cost (const vector_costs *) override;
};
/* Implement targetm.vectorize.create_costs. */
@@ -23918,6 +23919,31 @@ ix86_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
return retval;
}
+void
+ix86_vector_costs::finish_cost (const vector_costs *scalar_costs)
+{
+ loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (m_vinfo);
+ if (loop_vinfo && !m_costing_for_scalar)
+ {
+ /* We are currently not asking the vectorizer to compare costs
+ between different vector mode sizes. When using predication
+ that will end up always choosing the prefered mode size even
+ if there's a smaller mode covering all lanes. Test for this
+ situation and artificially reject the larger mode attempt.
+ ??? We currently lack masked ops for sub-SSE sized modes,
+ so we could restrict this rejection to AVX and AVX512 modes
+ but error on the safe side for now. */
+ if (LOOP_VINFO_USING_PARTIAL_VECTORS_P (loop_vinfo)
+ && !LOOP_VINFO_EPILOGUE_P (loop_vinfo)
+ && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
+ && (exact_log2 (LOOP_VINFO_VECT_FACTOR (loop_vinfo).to_constant ())
+ > ceil_log2 (LOOP_VINFO_INT_NITERS (loop_vinfo))))
+ m_costs[vect_body] = INT_MAX;
+ }
+
+ vector_costs::finish_cost (scalar_costs);
+}
+
/* Validate target specific memory model bits in VAL. */
static unsigned HOST_WIDE_INT
diff --git a/gcc/testsuite/gcc.target/i386/vect-partial-vectors-1.c b/gcc/testsuite/gcc.target/i386/vect-partial-vectors-1.c
new file mode 100644
index 0000000..3834720
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vect-partial-vectors-1.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512f -mavx512vl -mprefer-vector-width=512 --param vect-partial-vector-usage=1" } */
+
+void foo (int * __restrict a, int *b)
+{
+ for (int i = 0; i < 4; ++i)
+ a[i] = b[i] + 42;
+}
+
+/* We do not want to optimize this using masked AVX or AXV512
+ but unmasked SSE. */
+/* { dg-final { scan-assembler-not "\[yz\]mm" } } */
+/* { dg-final { scan-assembler "xmm" } } */
diff --git a/gcc/testsuite/gcc.target/i386/vect-partial-vectors-2.c b/gcc/testsuite/gcc.target/i386/vect-partial-vectors-2.c
new file mode 100644
index 0000000..4ab2cbc
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/vect-partial-vectors-2.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512f -mavx512vl -mprefer-vector-width=512 --param vect-partial-vector-usage=1" } */
+
+void foo (int * __restrict a, int *b)
+{
+ for (int i = 0; i < 7; ++i)
+ a[i] = b[i] + 42;
+}
+
+/* We want to optimize this using masked AVX, not AXV512 or SSE. */
+/* { dg-final { scan-assembler-not "zmm" } } */
+/* { dg-final { scan-assembler "ymm\[^\r\n\]*\{%k" } } */