aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCui,Lili <lili.cui@intel.com>2022-06-08 11:25:57 +0800
committerliuhongt <hongtao.liu@intel.com>2022-06-09 14:59:44 +0800
commit269edf4e5e6ab489730038f7e3495550623179fe (patch)
tree2a0dc0b8c5bddaa0aa9bf0c6a1f5748c3a0c1d00
parent2fc6e3d55f6080da3a43243f595bd1266595127d (diff)
downloadgcc-269edf4e5e6ab489730038f7e3495550623179fe.zip
gcc-269edf4e5e6ab489730038f7e3495550623179fe.tar.gz
gcc-269edf4e5e6ab489730038f7e3495550623179fe.tar.bz2
Update {skylake,icelake,alderlake}_cost to add a bit preference to vector store.
Since the interger vector construction cost has changed, we need to adjust the load and store costs for intel processers. With the patch applied 538.imagic_r:gets ~6% improvement on ADL for multicopy. 525.x264_r :gets ~2% improvement on ADL and ICX for multicopy. with no measurable changes for other benchmarks. gcc/ChangeLog PR target/105493 * config/i386/x86-tune-costs.h (skylake_cost): Raise the gpr load cost from 4 to 6 and gpr store cost from 6 to 8. Change SSE loads and unaligned loads cost from {6, 6, 6, 10, 20} to {8, 8, 8, 8, 16}. (icelake_cost): Ditto. (alderlake_cost): Raise the gpr store cost from 6 to 8 and SSE loads, stores and unaligned stores cost from {6, 6, 6, 10, 15} to {8, 8, 8, 10, 15}. gcc/testsuite/ PR target/105493 * gcc.target/i386/pr91446.c: Adjust to expect vectorization * gcc.target/i386/pr99881.c: XFAIL. * gcc.target/i386/pr105493.c: New. * g++.target/i386/pr105638.C: Use other sequence checks instead of vpxor, because code generation changed.
-rw-r--r--gcc/config/i386/x86-tune-costs.h26
-rw-r--r--gcc/testsuite/g++.target/i386/pr105638.C3
-rw-r--r--gcc/testsuite/gcc.target/i386/pr105493.c51
-rw-r--r--gcc/testsuite/gcc.target/i386/pr91446.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr99881.c2
5 files changed, 68 insertions, 16 deletions
diff --git a/gcc/config/i386/x86-tune-costs.h b/gcc/config/i386/x86-tune-costs.h
index ea34a93..6c9066c 100644
--- a/gcc/config/i386/x86-tune-costs.h
+++ b/gcc/config/i386/x86-tune-costs.h
@@ -1897,15 +1897,15 @@ struct processor_costs skylake_cost = {
8, /* "large" insn */
17, /* MOVE_RATIO */
17, /* CLEAR_RATIO */
- {4, 4, 4}, /* cost of loading integer registers
+ {6, 6, 6}, /* cost of loading integer registers
in QImode, HImode and SImode.
Relative to reg-reg move (2). */
- {6, 6, 6}, /* cost of storing integer registers */
- {6, 6, 6, 10, 20}, /* cost of loading SSE register
+ {8, 8, 8}, /* cost of storing integer registers */
+ {8, 8, 8, 8, 16}, /* cost of loading SSE register
in 32bit, 64bit, 128bit, 256bit and 512bit */
{8, 8, 8, 8, 16}, /* cost of storing SSE register
in 32bit, 64bit, 128bit, 256bit and 512bit */
- {6, 6, 6, 10, 20}, /* cost of unaligned loads. */
+ {8, 8, 8, 8, 16}, /* cost of unaligned loads. */
{8, 8, 8, 8, 16}, /* cost of unaligned stores. */
2, 2, 4, /* cost of moving XMM,YMM,ZMM register */
6, /* cost of moving SSE register to integer. */
@@ -2023,15 +2023,15 @@ struct processor_costs icelake_cost = {
8, /* "large" insn */
17, /* MOVE_RATIO */
17, /* CLEAR_RATIO */
- {4, 4, 4}, /* cost of loading integer registers
+ {6, 6, 6}, /* cost of loading integer registers
in QImode, HImode and SImode.
Relative to reg-reg move (2). */
- {6, 6, 6}, /* cost of storing integer registers */
- {6, 6, 6, 10, 20}, /* cost of loading SSE register
+ {8, 8, 8}, /* cost of storing integer registers */
+ {8, 8, 8, 8, 16}, /* cost of loading SSE register
in 32bit, 64bit, 128bit, 256bit and 512bit */
{8, 8, 8, 8, 16}, /* cost of storing SSE register
in 32bit, 64bit, 128bit, 256bit and 512bit */
- {6, 6, 6, 10, 20}, /* cost of unaligned loads. */
+ {8, 8, 8, 8, 16}, /* cost of unaligned loads. */
{8, 8, 8, 8, 16}, /* cost of unaligned stores. */
2, 2, 4, /* cost of moving XMM,YMM,ZMM register */
6, /* cost of moving SSE register to integer. */
@@ -2146,13 +2146,13 @@ struct processor_costs alderlake_cost = {
{6, 6, 6}, /* cost of loading integer registers
in QImode, HImode and SImode.
Relative to reg-reg move (2). */
- {6, 6, 6}, /* cost of storing integer registers */
- {6, 6, 6, 10, 15}, /* cost of loading SSE register
+ {8, 8, 8}, /* cost of storing integer registers */
+ {8, 8, 8, 10, 15}, /* cost of loading SSE register
in 32bit, 64bit, 128bit, 256bit and 512bit */
- {6, 6, 6, 10, 15}, /* cost of storing SSE register
+ {8, 8, 8, 10, 15}, /* cost of storing SSE register
in 32bit, 64bit, 128bit, 256bit and 512bit */
- {6, 6, 6, 10, 15}, /* cost of unaligned loads. */
- {6, 6, 6, 10, 15}, /* cost of unaligned storess. */
+ {8, 8, 8, 10, 15}, /* cost of unaligned loads. */
+ {8, 8, 8, 10, 15}, /* cost of unaligned storess. */
2, 3, 4, /* cost of moving XMM,YMM,ZMM register */
6, /* cost of moving SSE register to integer. */
18, 6, /* Gather load static, per_elt. */
diff --git a/gcc/testsuite/g++.target/i386/pr105638.C b/gcc/testsuite/g++.target/i386/pr105638.C
index ff40a45..c877093 100644
--- a/gcc/testsuite/g++.target/i386/pr105638.C
+++ b/gcc/testsuite/g++.target/i386/pr105638.C
@@ -1,6 +1,7 @@
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-std=gnu++20 -O2 -march=skylake" } */
-/* { dg-final { scan-assembler-not "vpxor" } } */
+/* { dg-final { scan-assembler-not "call\[\\t \]_?memset\[\r\n\]\[^\r\n\]movq\[\\t \]%\[a-z0-9]*, %\[a-z0-9]*\[\r\n\]\[^\r\n\]vpxor\[\\t \]%xmm0, %xmm0, %xmm0\[\r\n\]\[^\r\n\]vmovdqu\[\\t \]%xmm0, 36\\(%rax\\)" } } */
+
#include <stdint.h>
#include <vector>
diff --git a/gcc/testsuite/gcc.target/i386/pr105493.c b/gcc/testsuite/gcc.target/i386/pr105493.c
new file mode 100644
index 0000000..c6fd167
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr105493.c
@@ -0,0 +1,51 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-Ofast -march=icelake-server -fdump-tree-slp-details" } */
+
+typedef unsigned char uint8_t;
+typedef unsigned int uint32_t;
+typedef unsigned short uint16_t;
+
+static inline
+uint32_t abs2 ( uint32_t a )
+{
+ uint32_t s = ((a>>15)&0x10001)*0xffff;
+ return (a+s)^s;
+}
+
+#define HADAMARD4(d0, d1, d2, d3, s0, s1, s2, s3) {\
+ int t0 = s0 + s1;\
+ int t1 = s0 - s1;\
+ int t2 = s2 + s3;\
+ int t3 = s2 - s3;\
+ d0 = t0 + t2;\
+ d2 = t0 - t2;\
+ d1 = t1 + t3;\
+ d3 = t1 - t3;\
+}
+
+int
+foo ( uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 )
+{
+ uint32_t tmp[4][4];
+ uint32_t a0, a1, a2, a3;
+ int sum = 0;
+ for( int i = 0; i < 4; i++, pix1 += i_pix1, pix2 += i_pix2 )
+ {
+ a0 = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
+ a1 = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
+ a2 = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
+ a3 = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
+ HADAMARD4( tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], a0,a1,a2,a3 );
+ }
+ for( int i = 0; i < 4; i++ )
+ {
+ HADAMARD4( a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i] );
+ sum += abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);
+ }
+ return (((uint16_t)sum) + ((uint32_t)sum>>16)) >> 1;
+}
+
+
+/* The first loop should be vectorized, which will eliminate redundant stores
+ and loads. */
+/* { dg-final { scan-tree-dump-times " MEM <vector\\\(4\\\) unsigned int> \\\[\[\^\]\]\*\\\] = " 4 "slp1" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr91446.c b/gcc/testsuite/gcc.target/i386/pr91446.c
index 067bf43..0243ca3 100644
--- a/gcc/testsuite/gcc.target/i386/pr91446.c
+++ b/gcc/testsuite/gcc.target/i386/pr91446.c
@@ -21,4 +21,4 @@ foo (unsigned long long width, unsigned long long height,
bar (&t);
}
-/* { dg-final { scan-assembler-times "xmm\[0-9\]" 0 } } */
+/* { dg-final { scan-assembler-times "vmovdqa\[^\n\r\]*xmm\[0-9\]" 2 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr99881.c b/gcc/testsuite/gcc.target/i386/pr99881.c
index a1ec1d1b..3e087eb 100644
--- a/gcc/testsuite/gcc.target/i386/pr99881.c
+++ b/gcc/testsuite/gcc.target/i386/pr99881.c
@@ -1,7 +1,7 @@
/* PR target/99881. */
/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-Ofast -march=skylake" } */
-/* { dg-final { scan-assembler-not "xmm\[0-9\]" } } */
+/* { dg-final { scan-assembler-not "xmm\[0-9\]" { xfail *-*-* } } } */
void
foo (int* __restrict a, int n, int c)