aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/x86_64/multiarch
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2019-01-21 11:27:25 -0800
committerH.J. Lu <hjl.tools@gmail.com>2019-01-21 11:27:36 -0800
commit231c56760c1e2ded21ad96bbb860b1f08c556c7a (patch)
tree4aca6b1947a0188731cbf37e27b307cc8603b1ef /sysdeps/x86_64/multiarch
parentb304fc201d2f6baf52ea790df8643e99772243cd (diff)
downloadglibc-231c56760c1e2ded21ad96bbb860b1f08c556c7a.zip
glibc-231c56760c1e2ded21ad96bbb860b1f08c556c7a.tar.gz
glibc-231c56760c1e2ded21ad96bbb860b1f08c556c7a.tar.bz2
x86-64 memcpy: Properly handle the length parameter [BZ# 24097]
On x32, the size_t parameter may be passed in the lower 32 bits of a 64-bit register with the non-zero upper 32 bits. The string/memory functions written in assembly can only use the lower 32 bits of a 64-bit register as length or must clear the upper 32 bits before using the full 64-bit register for length. This pach fixes memcpy for x32. Tested on x86-64 and x32. On x86-64, libc.so is the same with and withou the fix. [BZ# 24097] CVE-2019-6488 * sysdeps/x86_64/multiarch/memcpy-ssse3-back.S: Use RDX_LP for length. Clear the upper 32 bits of RDX register. * sysdeps/x86_64/multiarch/memcpy-ssse3.S: Likewise. * sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S: Likewise. * sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S: Likewise. * sysdeps/x86_64/x32/Makefile (tests): Add tst-size_t-memcpy. tst-size_t-wmemchr. * sysdeps/x86_64/x32/tst-size_t-memcpy.c: New file.
Diffstat (limited to 'sysdeps/x86_64/multiarch')
-rw-r--r--sysdeps/x86_64/multiarch/memcpy-ssse3-back.S17
-rw-r--r--sysdeps/x86_64/multiarch/memcpy-ssse3.S17
-rw-r--r--sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S16
-rw-r--r--sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S54
4 files changed, 63 insertions, 41 deletions
diff --git a/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S b/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S
index bcfb21e..b9188cb 100644
--- a/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S
+++ b/sysdeps/x86_64/multiarch/memcpy-ssse3-back.S
@@ -45,28 +45,33 @@
.section .text.ssse3,"ax",@progbits
#if !defined USE_AS_MEMPCPY && !defined USE_AS_MEMMOVE
ENTRY (MEMPCPY_CHK)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMPCPY_CHK)
ENTRY (MEMPCPY)
- movq %rdi, %rax
- addq %rdx, %rax
+ mov %RDI_LP, %RAX_LP
+ add %RDX_LP, %RAX_LP
jmp L(start)
END (MEMPCPY)
#endif
#if !defined USE_AS_BCOPY
ENTRY (MEMCPY_CHK)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMCPY_CHK)
#endif
ENTRY (MEMCPY)
- mov %rdi, %rax
+ mov %RDI_LP, %RAX_LP
#ifdef USE_AS_MEMPCPY
- add %rdx, %rax
+ add %RDX_LP, %RAX_LP
+#endif
+
+#ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ mov %edx, %edx
#endif
#ifdef USE_AS_MEMMOVE
diff --git a/sysdeps/x86_64/multiarch/memcpy-ssse3.S b/sysdeps/x86_64/multiarch/memcpy-ssse3.S
index 7d74dbd..8f68315 100644
--- a/sysdeps/x86_64/multiarch/memcpy-ssse3.S
+++ b/sysdeps/x86_64/multiarch/memcpy-ssse3.S
@@ -45,28 +45,33 @@
.section .text.ssse3,"ax",@progbits
#if !defined USE_AS_MEMPCPY && !defined USE_AS_MEMMOVE
ENTRY (MEMPCPY_CHK)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMPCPY_CHK)
ENTRY (MEMPCPY)
- movq %rdi, %rax
- addq %rdx, %rax
+ mov %RDI_LP, %RAX_LP
+ add %RDX_LP, %RAX_LP
jmp L(start)
END (MEMPCPY)
#endif
#if !defined USE_AS_BCOPY
ENTRY (MEMCPY_CHK)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMCPY_CHK)
#endif
ENTRY (MEMCPY)
- mov %rdi, %rax
+ mov %RDI_LP, %RAX_LP
#ifdef USE_AS_MEMPCPY
- add %rdx, %rax
+ add %RDX_LP, %RAX_LP
+#endif
+
+#ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ mov %edx, %edx
#endif
#ifdef USE_AS_MEMMOVE
diff --git a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
index b39324d..27df2ba 100644
--- a/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
+++ b/sysdeps/x86_64/multiarch/memmove-avx512-no-vzeroupper.S
@@ -24,27 +24,31 @@
.section .text.avx512,"ax",@progbits
ENTRY (__mempcpy_chk_avx512_no_vzeroupper)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (__mempcpy_chk_avx512_no_vzeroupper)
ENTRY (__mempcpy_avx512_no_vzeroupper)
- movq %rdi, %rax
- addq %rdx, %rax
+ mov %RDI_LP, %RAX_LP
+ add %RDX_LP, %RAX_LP
jmp L(start)
END (__mempcpy_avx512_no_vzeroupper)
ENTRY (__memmove_chk_avx512_no_vzeroupper)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (__memmove_chk_avx512_no_vzeroupper)
ENTRY (__memmove_avx512_no_vzeroupper)
- mov %rdi, %rax
+ mov %RDI_LP, %RAX_LP
# ifdef USE_AS_MEMPCPY
- add %rdx, %rax
+ add %RDX_LP, %RAX_LP
# endif
L(start):
+# ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ mov %edx, %edx
+# endif
lea (%rsi, %rdx), %rcx
lea (%rdi, %rdx), %r9
cmp $512, %rdx
diff --git a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
index 6e4959f..2e9d86b 100644
--- a/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
+++ b/sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S
@@ -106,20 +106,20 @@
.section SECTION(.text),"ax",@progbits
#if defined SHARED && IS_IN (libc)
ENTRY (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned))
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned))
#endif
ENTRY (MEMPCPY_SYMBOL (__mempcpy, unaligned))
- movq %rdi, %rax
- addq %rdx, %rax
+ mov %RDI_LP, %RAX_LP
+ add %RDX_LP, %RAX_LP
jmp L(start)
END (MEMPCPY_SYMBOL (__mempcpy, unaligned))
#if defined SHARED && IS_IN (libc)
ENTRY (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned))
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned))
#endif
@@ -127,9 +127,13 @@ END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned))
ENTRY (MEMMOVE_SYMBOL (__memmove, unaligned))
movq %rdi, %rax
L(start):
- cmpq $VEC_SIZE, %rdx
+# ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ movl %edx, %edx
+# endif
+ cmp $VEC_SIZE, %RDX_LP
jb L(less_vec)
- cmpq $(VEC_SIZE * 2), %rdx
+ cmp $(VEC_SIZE * 2), %RDX_LP
ja L(more_2x_vec)
#if !defined USE_MULTIARCH || !IS_IN (libc)
L(last_2x_vec):
@@ -149,38 +153,38 @@ END (MEMMOVE_SYMBOL (__memmove, unaligned))
# if VEC_SIZE == 16
ENTRY (__mempcpy_chk_erms)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (__mempcpy_chk_erms)
/* Only used to measure performance of REP MOVSB. */
ENTRY (__mempcpy_erms)
- movq %rdi, %rax
+ mov %RDI_LP, %RAX_LP
/* Skip zero length. */
- testq %rdx, %rdx
+ test %RDX_LP, %RDX_LP
jz 2f
- addq %rdx, %rax
+ add %RDX_LP, %RAX_LP
jmp L(start_movsb)
END (__mempcpy_erms)
ENTRY (__memmove_chk_erms)
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (__memmove_chk_erms)
ENTRY (__memmove_erms)
movq %rdi, %rax
/* Skip zero length. */
- testq %rdx, %rdx
+ test %RDX_LP, %RDX_LP
jz 2f
L(start_movsb):
- movq %rdx, %rcx
- cmpq %rsi, %rdi
+ mov %RDX_LP, %RCX_LP
+ cmp %RSI_LP, %RDI_LP
jb 1f
/* Source == destination is less common. */
je 2f
- leaq (%rsi,%rcx), %rdx
- cmpq %rdx, %rdi
+ lea (%rsi,%rcx), %RDX_LP
+ cmp %RDX_LP, %RDI_LP
jb L(movsb_backward)
1:
rep movsb
@@ -200,20 +204,20 @@ strong_alias (__memmove_chk_erms, __memcpy_chk_erms)
# ifdef SHARED
ENTRY (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned_erms))
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMMOVE_CHK_SYMBOL (__mempcpy_chk, unaligned_erms))
# endif
ENTRY (MEMMOVE_SYMBOL (__mempcpy, unaligned_erms))
- movq %rdi, %rax
- addq %rdx, %rax
+ mov %RDI_LP, %RAX_LP
+ add %RDX_LP, %RAX_LP
jmp L(start_erms)
END (MEMMOVE_SYMBOL (__mempcpy, unaligned_erms))
# ifdef SHARED
ENTRY (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned_erms))
- cmpq %rdx, %rcx
+ cmp %RDX_LP, %RCX_LP
jb HIDDEN_JUMPTARGET (__chk_fail)
END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned_erms))
# endif
@@ -221,9 +225,13 @@ END (MEMMOVE_CHK_SYMBOL (__memmove_chk, unaligned_erms))
ENTRY (MEMMOVE_SYMBOL (__memmove, unaligned_erms))
movq %rdi, %rax
L(start_erms):
- cmpq $VEC_SIZE, %rdx
+# ifdef __ILP32__
+ /* Clear the upper 32 bits. */
+ movl %edx, %edx
+# endif
+ cmp $VEC_SIZE, %RDX_LP
jb L(less_vec)
- cmpq $(VEC_SIZE * 2), %rdx
+ cmp $(VEC_SIZE * 2), %RDX_LP
ja L(movsb_more_2x_vec)
L(last_2x_vec):
/* From VEC and to 2 * VEC. No branch when size == VEC_SIZE. */
@@ -250,7 +258,7 @@ L(movsb):
# endif
jb L(more_8x_vec_backward)
1:
- movq %rdx, %rcx
+ mov %RDX_LP, %RCX_LP
rep movsb
L(nop):
ret