diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2017-04-18 14:01:45 -0700 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2017-04-18 14:01:45 -0700 |
commit | 4cb334c4d6249686653137ec273d081371b3672d (patch) | |
tree | 0ddce19851595aeccad1caf222c91014f7817de2 /sysdeps | |
parent | 1c53cb49de6d82d9469ccbd5aa0c55924502bd8b (diff) | |
download | glibc-4cb334c4d6249686653137ec273d081371b3672d.zip glibc-4cb334c4d6249686653137ec273d081371b3672d.tar.gz glibc-4cb334c4d6249686653137ec273d081371b3672d.tar.bz2 |
x86: Use AVX2 memcpy/memset on Skylake server [BZ #21396]
On Skylake server, AVX512 load/store instructions in memcpy/memset may
lead to lower CPU turbo frequency in certain situations. Use of AVX2
in memcpy/memset has been observed to have improved overall performance
in many workloads due to the higher frequency.
Since AVX512ER is unique to Xeon Phi, this patch sets Prefer_No_AVX512
if AVX512ER isn't available so that AVX2 versions of memcpy/memset are
used on Skylake server.
[BZ #21396]
* sysdeps/x86/cpu-features.c (init_cpu_features): Set
Prefer_No_AVX512 if AVX512ER isn't available.
* sysdeps/x86/cpu-features.h (bit_arch_Prefer_No_AVX512): New.
(index_arch_Prefer_No_AVX512): Likewise.
* sysdeps/x86_64/multiarch/memcpy.S (__new_memcpy): Don't use
AVX512 version if Prefer_No_AVX512 is set.
* sysdeps/x86_64/multiarch/memcpy_chk.S (__memcpy_chk):
Likewise.
* sysdeps/x86_64/multiarch/memmove.S (__libc_memmove): Likewise.
* sysdeps/x86_64/multiarch/memmove_chk.S (__memmove_chk):
Likewise.
* sysdeps/x86_64/multiarch/mempcpy.S (__mempcpy): Likewise.
* sysdeps/x86_64/multiarch/mempcpy_chk.S (__mempcpy_chk):
Likewise.
* sysdeps/x86_64/multiarch/memset.S (memset): Likewise.
* sysdeps/x86_64/multiarch/memset_chk.S (__memset_chk):
Likewise.
Diffstat (limited to 'sysdeps')
-rw-r--r-- | sysdeps/x86/cpu-features.c | 6 | ||||
-rw-r--r-- | sysdeps/x86/cpu-features.h | 3 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memcpy.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memcpy_chk.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memmove.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memmove_chk.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/mempcpy.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/mempcpy_chk.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memset.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/memset_chk.S | 2 |
10 files changed, 24 insertions, 1 deletions
diff --git a/sysdeps/x86/cpu-features.c b/sysdeps/x86/cpu-features.c index ae7f844..f30918d 100644 --- a/sysdeps/x86/cpu-features.c +++ b/sysdeps/x86/cpu-features.c @@ -224,10 +224,14 @@ init_cpu_features (struct cpu_features *cpu_features) |= bit_arch_AVX_Fast_Unaligned_Load; /* Since AVX512ER is unique to Xeon Phi, set Prefer_No_VZEROUPPER - if AVX512ER is available. */ + if AVX512ER is available. Don't use AVX512 to avoid lower CPU + frequency if AVX512ER isn't available. */ if (CPU_FEATURES_CPU_P (cpu_features, AVX512ER)) cpu_features->feature[index_arch_Prefer_No_VZEROUPPER] |= bit_arch_Prefer_No_VZEROUPPER; + else + cpu_features->feature[index_arch_Prefer_No_AVX512] + |= bit_arch_Prefer_No_AVX512; /* To avoid SSE transition penalty, use _dl_runtime_resolve_slow. If XGETBV suports ECX == 1, use _dl_runtime_resolve_opt. */ diff --git a/sysdeps/x86/cpu-features.h b/sysdeps/x86/cpu-features.h index 1583d65..85a39e7 100644 --- a/sysdeps/x86/cpu-features.h +++ b/sysdeps/x86/cpu-features.h @@ -39,6 +39,7 @@ #define bit_arch_Prefer_ERMS (1 << 19) #define bit_arch_Use_dl_runtime_resolve_opt (1 << 20) #define bit_arch_Use_dl_runtime_resolve_slow (1 << 21) +#define bit_arch_Prefer_No_AVX512 (1 << 22) /* CPUID Feature flags. */ @@ -118,6 +119,7 @@ # define index_arch_Prefer_ERMS FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Use_dl_runtime_resolve_opt FEATURE_INDEX_1*FEATURE_SIZE # define index_arch_Use_dl_runtime_resolve_slow FEATURE_INDEX_1*FEATURE_SIZE +# define index_arch_Prefer_No_AVX512 FEATURE_INDEX_1*FEATURE_SIZE # if defined (_LIBC) && !IS_IN (nonlib) @@ -302,6 +304,7 @@ extern const struct cpu_features *__get_cpu_features (void) # define index_arch_Prefer_ERMS FEATURE_INDEX_1 # define index_arch_Use_dl_runtime_resolve_opt FEATURE_INDEX_1 # define index_arch_Use_dl_runtime_resolve_slow FEATURE_INDEX_1 +# define index_arch_Prefer_No_AVX512 FEATURE_INDEX_1 #endif /* !__ASSEMBLER__ */ diff --git a/sysdeps/x86_64/multiarch/memcpy.S b/sysdeps/x86_64/multiarch/memcpy.S index 1f83ee3..af27703 100644 --- a/sysdeps/x86_64/multiarch/memcpy.S +++ b/sysdeps/x86_64/multiarch/memcpy.S @@ -32,6 +32,8 @@ ENTRY(__new_memcpy) lea __memcpy_erms(%rip), %RAX_LP HAS_ARCH_FEATURE (Prefer_ERMS) jnz 2f + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memcpy_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memcpy_chk.S b/sysdeps/x86_64/multiarch/memcpy_chk.S index 5492342..8737fb9 100644 --- a/sysdeps/x86_64/multiarch/memcpy_chk.S +++ b/sysdeps/x86_64/multiarch/memcpy_chk.S @@ -30,6 +30,8 @@ ENTRY(__memcpy_chk) .type __memcpy_chk, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memcpy_chk_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memmove.S b/sysdeps/x86_64/multiarch/memmove.S index 2021bfc..8c534e8 100644 --- a/sysdeps/x86_64/multiarch/memmove.S +++ b/sysdeps/x86_64/multiarch/memmove.S @@ -30,6 +30,8 @@ ENTRY(__libc_memmove) lea __memmove_erms(%rip), %RAX_LP HAS_ARCH_FEATURE (Prefer_ERMS) jnz 2f + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memmove_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memmove_chk.S b/sysdeps/x86_64/multiarch/memmove_chk.S index 8a252ad..7870dd0 100644 --- a/sysdeps/x86_64/multiarch/memmove_chk.S +++ b/sysdeps/x86_64/multiarch/memmove_chk.S @@ -29,6 +29,8 @@ ENTRY(__memmove_chk) .type __memmove_chk, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __memmove_chk_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/mempcpy.S b/sysdeps/x86_64/multiarch/mempcpy.S index 79c840d..b8b2b28 100644 --- a/sysdeps/x86_64/multiarch/mempcpy.S +++ b/sysdeps/x86_64/multiarch/mempcpy.S @@ -32,6 +32,8 @@ ENTRY(__mempcpy) lea __mempcpy_erms(%rip), %RAX_LP HAS_ARCH_FEATURE (Prefer_ERMS) jnz 2f + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __mempcpy_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/mempcpy_chk.S b/sysdeps/x86_64/multiarch/mempcpy_chk.S index 6927962..072b22c 100644 --- a/sysdeps/x86_64/multiarch/mempcpy_chk.S +++ b/sysdeps/x86_64/multiarch/mempcpy_chk.S @@ -30,6 +30,8 @@ ENTRY(__mempcpy_chk) .type __mempcpy_chk, @gnu_indirect_function LOAD_RTLD_GLOBAL_RO_RDX + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 1f HAS_ARCH_FEATURE (AVX512F_Usable) jz 1f lea __mempcpy_chk_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memset.S b/sysdeps/x86_64/multiarch/memset.S index c958b2f..9d33118 100644 --- a/sysdeps/x86_64/multiarch/memset.S +++ b/sysdeps/x86_64/multiarch/memset.S @@ -41,6 +41,8 @@ ENTRY(memset) jnz L(AVX512F) lea __memset_avx2_unaligned(%rip), %RAX_LP L(AVX512F): + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 2f HAS_ARCH_FEATURE (AVX512F_Usable) jz 2f lea __memset_avx512_no_vzeroupper(%rip), %RAX_LP diff --git a/sysdeps/x86_64/multiarch/memset_chk.S b/sysdeps/x86_64/multiarch/memset_chk.S index 79eaa37..7e08311 100644 --- a/sysdeps/x86_64/multiarch/memset_chk.S +++ b/sysdeps/x86_64/multiarch/memset_chk.S @@ -38,6 +38,8 @@ ENTRY(__memset_chk) jnz L(AVX512F) lea __memset_chk_avx2_unaligned(%rip), %RAX_LP L(AVX512F): + HAS_ARCH_FEATURE (Prefer_No_AVX512) + jnz 2f HAS_ARCH_FEATURE (AVX512F_Usable) jz 2f lea __memset_chk_avx512_no_vzeroupper(%rip), %RAX_LP |