aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>2016-12-15 18:17:09 -0200
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>2016-12-27 10:50:41 -0200
commit3daef2c8ee4df29b9806e3bb2f407417c1222e9a (patch)
treeb752089e0a3a443da08b6161e1ef6c626292e854
parentcecbc7967f0bcac718b6f8f8942b58403c0e917c (diff)
downloadglibc-3daef2c8ee4df29b9806e3bb2f407417c1222e9a.zip
glibc-3daef2c8ee4df29b9806e3bb2f407417c1222e9a.tar.gz
glibc-3daef2c8ee4df29b9806e3bb2f407417c1222e9a.tar.bz2
Fix x86_64 memchr for large input sizes
Current optimized memchr for x86_64 does for input arguments pointers module 64 in range of [49,63] if there is no searchr char in the rest of 64-byte block a pointer addition which might overflow: * sysdeps/x86_64/memchr.S 77 .p2align 4 78 L(unaligned_no_match): 79 add %rcx, %rdx Add (uintptr_t)s % 16 to n in %rdx. 80 sub $16, %rdx 81 jbe L(return_null) This patch fixes by adding a saturated math that sets a maximum pointer value if it overflows (UINTPTR_MAX). Checked on x86_64-linux-gnu and powerpc64-linux-gnu. [BZ# 19387] * sysdeps/x86_64/memchr.S (memchr): Avoid overflow in pointer addition. * string/test-memchr.c (do_test): Remove alignment limitation. (test_main): Add test that trigger BZ# 19387.
-rw-r--r--ChangeLog8
-rw-r--r--string/test-memchr.c9
-rw-r--r--sysdeps/x86_64/memchr.S6
3 files changed, 18 insertions, 5 deletions
diff --git a/ChangeLog b/ChangeLog
index f090910..297205c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2016-12-27 Adhemerval Zanella <adhemerval.zanella@linaro.org>
+
+ [BZ# 19387]
+ * sysdeps/x86_64/memchr.S (memchr): Avoid overflow in pointer
+ addition.
+ * string/test-memchr.c (do_test): Remove alignment limitation.
+ (test_main): Add test that trigger BZ# 19387.
+
2016-12-26 Nick Alcock <nick.alcock@oracle.com>
[BZ #7065]
diff --git a/string/test-memchr.c b/string/test-memchr.c
index 0690cb4..10d696a 100644
--- a/string/test-memchr.c
+++ b/string/test-memchr.c
@@ -76,7 +76,6 @@ do_test (size_t align, size_t pos, size_t len, size_t n, int seek_char)
size_t i;
CHAR *result;
- align &= 7;
if ((align + len) * sizeof (CHAR) >= page_size)
return;
@@ -194,12 +193,12 @@ test_main (void)
do_test (i, 64, 256, SIZE_MAX, 0);
}
- for (i = 1; i < 16; ++i)
+ for (i = 1; i < 64; ++i)
{
- for (j = 1; j < 16; j++)
+ for (j = 1; j < 64; j++)
{
- do_test (0, 16 - j, 16, SIZE_MAX, 23);
- do_test (i, 16 - j, 16, SIZE_MAX, 23);
+ do_test (0, 64 - j, 64, SIZE_MAX, 23);
+ do_test (i, 64 - j, 64, SIZE_MAX, 23);
}
}
diff --git a/sysdeps/x86_64/memchr.S b/sysdeps/x86_64/memchr.S
index 132eacb..1e34568 100644
--- a/sysdeps/x86_64/memchr.S
+++ b/sysdeps/x86_64/memchr.S
@@ -76,7 +76,13 @@ L(crosscache):
.p2align 4
L(unaligned_no_match):
+ /* Calculate the last acceptable address and check for possible
+ addition overflow by using satured math:
+ rdx = rcx + rdx
+ rdx |= -(rdx < rcx) */
add %rcx, %rdx
+ sbb %rax, %rax
+ or %rax, %rdx
sub $16, %rdx
jbe L(return_null)
add $16, %rdi