diff options
author | H.J. Lu <hjl.tools@gmail.com> | 2016-06-08 13:55:45 -0700 |
---|---|---|
committer | H.J. Lu <hjl.tools@gmail.com> | 2016-06-08 13:56:14 -0700 |
commit | 5e8c5bb1ac83aa2577d64d82467a653fa413f7ce (patch) | |
tree | c713874b92fbc3f6858e48d71a883a990665a2f3 /sysdeps/x86_64/memset.S | |
parent | 5188b973250523d3e9c80ea3ab4001f696e6fa1a (diff) | |
download | glibc-5e8c5bb1ac83aa2577d64d82467a653fa413f7ce.zip glibc-5e8c5bb1ac83aa2577d64d82467a653fa413f7ce.tar.gz glibc-5e8c5bb1ac83aa2577d64d82467a653fa413f7ce.tar.bz2 |
X86-64: Remove the previous SSE2/AVX2 memsets
Since the new SSE2/AVX2 memsets are faster than the previous ones, we
can remove the previous SSE2/AVX2 memsets and replace them with the
new ones. This reduces the size of libc.so by about 900 bytes.
No change in IFUNC selection if SSE2 and AVX2 memsets weren't used
before. If SSE2 or AVX2 memset was used, the new SSE2 or AVX2 memset
optimized with Enhanced REP STOSB will be used for processors with
ERMS. The new AVX512 memset will be used for processors with AVX512
which prefer vzeroupper.
[BZ #19881]
* sysdeps/x86_64/multiarch/memset-sse2-unaligned-erms.S: Folded
into ...
* sysdeps/x86_64/memset.S: This.
(__bzero): Removed.
(__memset_tail): Likewise.
(__memset_chk): Likewise.
(memset): Likewise.
(MEMSET_CHK_SYMBOL): New. Define only if MEMSET_SYMBOL isn't
defined.
(MEMSET_SYMBOL): Define only if MEMSET_SYMBOL isn't defined.
* sysdeps/x86_64/multiarch/memset-avx2.S: Removed.
(__memset_zero_constant_len_parameter): Check SHARED instead of
PIC.
* sysdeps/x86_64/multiarch/Makefile (sysdep_routines): Remove
memset-avx2 and memset-sse2-unaligned-erms.
* sysdeps/x86_64/multiarch/ifunc-impl-list.c
(__libc_ifunc_impl_list): Remove __memset_chk_sse2,
__memset_chk_avx2, __memset_sse2 and __memset_avx2_unaligned.
* sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S
(__bzero): Enabled.
* sysdeps/x86_64/multiarch/memset.S (memset): Replace
__memset_sse2 and __memset_avx2 with __memset_sse2_unaligned
and __memset_avx2_unaligned. Use __memset_sse2_unaligned_erms
or __memset_avx2_unaligned_erms if processor has ERMS. Support
__memset_avx512_unaligned_erms and __memset_avx512_unaligned.
(memset): Removed.
(__memset_chk): Likewise.
(MEMSET_SYMBOL): New.
(libc_hidden_builtin_def): Replace __memset_sse2 with
__memset_sse2_unaligned.
* sysdeps/x86_64/multiarch/memset_chk.S (__memset_chk): Replace
__memset_chk_sse2 and __memset_chk_avx2 with
__memset_chk_sse2_unaligned and __memset_chk_avx2_unaligned_erms.
Use __memset_chk_sse2_unaligned_erms or
__memset_chk_avx2_unaligned_erms if processor has ERMS. Support
__memset_chk_avx512_unaligned_erms and
__memset_chk_avx512_unaligned.
Diffstat (limited to 'sysdeps/x86_64/memset.S')
-rw-r--r-- | sysdeps/x86_64/memset.S | 123 |
1 files changed, 21 insertions, 102 deletions
diff --git a/sysdeps/x86_64/memset.S b/sysdeps/x86_64/memset.S index 4cf0da0..62b85c3 100644 --- a/sysdeps/x86_64/memset.S +++ b/sysdeps/x86_64/memset.S @@ -19,113 +19,32 @@ #include <sysdep.h> - .text -#if IS_IN (libc) -ENTRY(__bzero) - movq %rdi, %rax /* Set return value. */ - movq %rsi, %rdx /* Set n. */ - pxor %xmm0, %xmm0 - jmp L(entry_from_bzero) -END(__bzero) -weak_alias (__bzero, bzero) - -/* Like memset but takes additional parameter with return value. */ -ENTRY(__memset_tail) - movq %rcx, %rax /* Set return value. */ - - movd %esi, %xmm0 - punpcklbw %xmm0, %xmm0 - punpcklwd %xmm0, %xmm0 - pshufd $0, %xmm0, %xmm0 - - jmp L(entry_from_bzero) -END(__memset_tail) -#endif - -#if defined PIC && IS_IN (libc) -ENTRY_CHK (__memset_chk) - cmpq %rdx, %rcx - jb HIDDEN_JUMPTARGET (__chk_fail) -END_CHK (__memset_chk) +#define VEC_SIZE 16 +#define VEC(i) xmm##i +/* Don't use movups and movaps since it will get larger nop paddings for + alignment. */ +#define VMOVU movdqu +#define VMOVA movdqa + +#define VDUP_TO_VEC0_AND_SET_RETURN(d, r) \ + movd d, %xmm0; \ + movq r, %rax; \ + punpcklbw %xmm0, %xmm0; \ + punpcklwd %xmm0, %xmm0; \ + pshufd $0, %xmm0, %xmm0 + +#define SECTION(p) p + +#ifndef MEMSET_SYMBOL +# define MEMSET_CHK_SYMBOL(p,s) p +# define MEMSET_SYMBOL(p,s) memset #endif -ENTRY (memset) - movd %esi, %xmm0 - movq %rdi, %rax - punpcklbw %xmm0, %xmm0 - punpcklwd %xmm0, %xmm0 - pshufd $0, %xmm0, %xmm0 -L(entry_from_bzero): - cmpq $64, %rdx - ja L(loop_start) - cmpq $16, %rdx - jbe L(less_16_bytes) - cmpq $32, %rdx - movdqu %xmm0, (%rdi) - movdqu %xmm0, -16(%rdi,%rdx) - ja L(between_32_64_bytes) -L(return): - rep - ret - .p2align 4 -L(between_32_64_bytes): - movdqu %xmm0, 16(%rdi) - movdqu %xmm0, -32(%rdi,%rdx) - ret - .p2align 4 -L(loop_start): - leaq 64(%rdi), %rcx - movdqu %xmm0, (%rdi) - andq $-64, %rcx - movdqu %xmm0, -16(%rdi,%rdx) - movdqu %xmm0, 16(%rdi) - movdqu %xmm0, -32(%rdi,%rdx) - movdqu %xmm0, 32(%rdi) - movdqu %xmm0, -48(%rdi,%rdx) - movdqu %xmm0, 48(%rdi) - movdqu %xmm0, -64(%rdi,%rdx) - addq %rdi, %rdx - andq $-64, %rdx - cmpq %rdx, %rcx - je L(return) - .p2align 4 -L(loop): - movdqa %xmm0, (%rcx) - movdqa %xmm0, 16(%rcx) - movdqa %xmm0, 32(%rcx) - movdqa %xmm0, 48(%rcx) - addq $64, %rcx - cmpq %rcx, %rdx - jne L(loop) - rep - ret -L(less_16_bytes): - movq %xmm0, %rcx - testb $24, %dl - jne L(between8_16bytes) - testb $4, %dl - jne L(between4_7bytes) - testb $1, %dl - je L(odd_byte) - movb %cl, (%rdi) -L(odd_byte): - testb $2, %dl - je L(return) - movw %cx, -2(%rax,%rdx) - ret -L(between4_7bytes): - movl %ecx, (%rdi) - movl %ecx, -4(%rdi,%rdx) - ret -L(between8_16bytes): - movq %rcx, (%rdi) - movq %rcx, -8(%rdi,%rdx) - ret +#include "multiarch/memset-vec-unaligned-erms.S" -END (memset) libc_hidden_builtin_def (memset) -#if defined PIC && IS_IN (libc) && !defined USE_MULTIARCH +#if defined SHARED && IS_IN (libc) && !defined USE_MULTIARCH strong_alias (__memset_chk, __memset_zero_constant_len_parameter) .section .gnu.warning.__memset_zero_constant_len_parameter .string "memset used with constant zero length parameter; this could be due to transposed parameters" |