diff options
author | Adhemerval Zanella <adhemerval.zanella@linaro.org> | 2017-01-03 12:19:12 -0200 |
---|---|---|
committer | Adhemerval Zanella <adhemerval.zanella@linaro.org> | 2017-01-03 14:24:53 -0200 |
commit | 8dad72997af2be0dc72a4bc7dbe82d85c90334fc (patch) | |
tree | dc5d83dfccb1e2fba0354ecc9f13a538d47fbb42 | |
parent | d4d629e6187e33050902824a94498b6096eacac9 (diff) | |
download | glibc-8dad72997af2be0dc72a4bc7dbe82d85c90334fc.zip glibc-8dad72997af2be0dc72a4bc7dbe82d85c90334fc.tar.gz glibc-8dad72997af2be0dc72a4bc7dbe82d85c90334fc.tar.bz2 |
Fix x86 strncat optimized implementation for large sizes
Similar to BZ#19387, BZ#21014, and BZ#20971, both x86 sse2 strncat
optimized assembly implementations do not handle the size overflow
correctly.
The x86_64 one is in fact an issue with strcpy-sse2-unaligned, but
that is triggered also with strncat optimized implementation.
This patch uses a similar strategy used on 3daef2c8ee4df2, where
saturared math is used for overflow case.
Checked on x86_64-linux-gnu and i686-linux-gnu. It fixes BZ #19390.
[BZ #19390]
* string/test-strncat.c (test_main): Add tests with SIZE_MAX as
maximum string size.
* sysdeps/i386/i686/multiarch/strcat-sse2.S (STRCAT): Avoid overflow
in pointer addition.
* sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S (STRCPY):
Likewise.
-rw-r--r-- | ChangeLog | 10 | ||||
-rw-r--r-- | string/test-strncat.c | 15 | ||||
-rw-r--r-- | sysdeps/i386/i686/multiarch/strcat-sse2.S | 2 | ||||
-rw-r--r-- | sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S | 2 |
4 files changed, 29 insertions, 0 deletions
@@ -1,3 +1,13 @@ +2017-01-03 Adhemerval Zanella <adhemerval.zanella@linaro.org> + + [BZ #19390] + * string/test-strncat.c (test_main): Add tests with SIZE_MAX as + maximum string size. + * sysdeps/i386/i686/multiarch/strcat-sse2.S (STRCAT): Avoid overflow + in pointer addition. + * sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S (STRCPY): + Likewise. + 2017-01-03 Joseph Myers <joseph@codesourcery.com> * elf/Makefile ($(objpfx)tst-ldconfig-X.out): Correct arguments diff --git a/string/test-strncat.c b/string/test-strncat.c index 55d6cc5..26a6b5b 100644 --- a/string/test-strncat.c +++ b/string/test-strncat.c @@ -284,12 +284,23 @@ test_main (void) do_test (0, 0, 8, 8, n, SMALL_CHAR); do_test (0, 8, 8, 8, n, SMALL_CHAR); + do_test (0, 2, 2, 2, SIZE_MAX, SMALL_CHAR); + do_test (0, 0, 4, 4, SIZE_MAX, SMALL_CHAR); + do_test (4, 0, 4, 4, SIZE_MAX, BIG_CHAR); + do_test (0, 0, 8, 8, SIZE_MAX, SMALL_CHAR); + do_test (0, 8, 8, 8, SIZE_MAX, SMALL_CHAR); + for (i = 1; i < 8; ++i) { do_test (0, 0, 8 << i, 8 << i, n, SMALL_CHAR); do_test (8 - i, 2 * i, 8 << i, 8 << i, n, SMALL_CHAR); do_test (0, 0, 8 << i, 2 << i, n, SMALL_CHAR); do_test (8 - i, 2 * i, 8 << i, 2 << i, n, SMALL_CHAR); + + do_test (0, 0, 8 << i, 8 << i, SIZE_MAX, SMALL_CHAR); + do_test (8 - i, 2 * i, 8 << i, 8 << i, SIZE_MAX, SMALL_CHAR); + do_test (0, 0, 8 << i, 2 << i, SIZE_MAX, SMALL_CHAR); + do_test (8 - i, 2 * i, 8 << i, 2 << i, SIZE_MAX, SMALL_CHAR); } for (i = 1; i < 8; ++i) @@ -297,6 +308,10 @@ test_main (void) do_test (i, 2 * i, 8 << i, 1, n, SMALL_CHAR); do_test (2 * i, i, 8 << i, 1, n, BIG_CHAR); do_test (i, i, 8 << i, 10, n, SMALL_CHAR); + + do_test (i, 2 * i, 8 << i, 1, SIZE_MAX, SMALL_CHAR); + do_test (2 * i, i, 8 << i, 1, SIZE_MAX, BIG_CHAR); + do_test (i, i, 8 << i, 10, SIZE_MAX, SMALL_CHAR); } } diff --git a/sysdeps/i386/i686/multiarch/strcat-sse2.S b/sysdeps/i386/i686/multiarch/strcat-sse2.S index 145ae66..6359c73 100644 --- a/sysdeps/i386/i686/multiarch/strcat-sse2.S +++ b/sysdeps/i386/i686/multiarch/strcat-sse2.S @@ -227,6 +227,8 @@ L(StartStrcpyPart): pxor %xmm0, %xmm0 # ifdef USE_AS_STRNCAT add %ecx, %ebx + sbb %edx, %edx + or %edx, %ebx # endif sub %ecx, %eax jmp L(Unalign16Both) diff --git a/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S b/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S index c038043..6a5ab7a 100644 --- a/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S +++ b/sysdeps/x86_64/multiarch/strcpy-sse2-unaligned.S @@ -99,6 +99,8 @@ L(Unalign16Both): sub %rcx, %rdi # ifdef USE_AS_STRNCPY add %rcx, %r8 + sbb %rcx, %rcx + or %rcx, %r8 # endif mov $16, %rcx movdqa (%rsi, %rcx), %xmm1 |