aboutsummaryrefslogtreecommitdiff
path: root/newlib/libc
diff options
context:
space:
mode:
authorAlexey Lapshin <alexey.lapshin@espressif.com>2025-01-27 10:45:55 +0000
committerCorinna Vinschen <corinna@vinschen.de>2025-02-10 15:30:46 +0100
commitab827c1a6c2cb90cc2ded88a8f443a2f09c3bff7 (patch)
tree05e418a8a064f363628fa0abfb2af5f07fbb80f5 /newlib/libc
parentbc2723e98929a327d0a281407bd68cfe8420c16c (diff)
downloadnewlib-ab827c1a6c2cb90cc2ded88a8f443a2f09c3bff7.zip
newlib-ab827c1a6c2cb90cc2ded88a8f443a2f09c3bff7.tar.gz
newlib-ab827c1a6c2cb90cc2ded88a8f443a2f09c3bff7.tar.bz2
newlib: mem[p]cpy/memmove improve performance for optimized versions
This change improves performance on memory blocks with sizes in range [4..15]. Performance measurements made for RISCV machine (memset): size 4, CPU cycles change: 50 -> 37 size 5, CPU cycles change: 57 -> 40 size 6, CPU cycles change: 64 -> 47 size 7, CPU cycles change: 71 -> 54 size 8, CPU cycles change: 78 -> 44 size 9, CPU cycles change: 85 -> 47 size 10, CPU cycles change: 92 -> 54 size 11, CPU cycles change: 99 -> 61 size 12, CPU cycles change: 106 -> 51 size 13, CPU cycles change: 113 -> 54 size 14, CPU cycles change: 120 -> 61 size 15, CPU cycles change: 127 -> 68
Diffstat (limited to 'newlib/libc')
-rw-r--r--newlib/libc/string/memcpy.c2
-rw-r--r--newlib/libc/string/memmove.c2
-rw-r--r--newlib/libc/string/mempcpy.c2
3 files changed, 3 insertions, 3 deletions
diff --git a/newlib/libc/string/memcpy.c b/newlib/libc/string/memcpy.c
index 1bbd4e0..e680c44 100644
--- a/newlib/libc/string/memcpy.c
+++ b/newlib/libc/string/memcpy.c
@@ -57,7 +57,7 @@ memcpy (void *__restrict dst0,
/* If the size is small, or either SRC or DST is unaligned,
then punt into the byte copy loop. This should be rare. */
- if (!TOO_SMALL_BIG_BLOCK(len0) && !UNALIGNED_X_Y(src, dst))
+ if (!TOO_SMALL_LITTLE_BLOCK(len0) && !UNALIGNED_X_Y(src, dst))
{
aligned_dst = (long*)dst;
aligned_src = (long*)src;
diff --git a/newlib/libc/string/memmove.c b/newlib/libc/string/memmove.c
index a82744c..4c5ec6f 100644
--- a/newlib/libc/string/memmove.c
+++ b/newlib/libc/string/memmove.c
@@ -85,7 +85,7 @@ memmove (void *dst_void,
/* Use optimizing algorithm for a non-destructive copy to closely
match memcpy. If the size is small or either SRC or DST is unaligned,
then punt into the byte copy loop. This should be rare. */
- if (!TOO_SMALL_BIG_BLOCK(length) && !UNALIGNED_X_Y(src, dst))
+ if (!TOO_SMALL_LITTLE_BLOCK(length) && !UNALIGNED_X_Y(src, dst))
{
aligned_dst = (long*)dst;
aligned_src = (long*)src;
diff --git a/newlib/libc/string/mempcpy.c b/newlib/libc/string/mempcpy.c
index 06e97de..5618921 100644
--- a/newlib/libc/string/mempcpy.c
+++ b/newlib/libc/string/mempcpy.c
@@ -53,7 +53,7 @@ mempcpy (void *dst0,
/* If the size is small, or either SRC or DST is unaligned,
then punt into the byte copy loop. This should be rare. */
- if (!TOO_SMALL_BIG_BLOCK(len0) && !UNALIGNED_X_Y(src, dst))
+ if (!TOO_SMALL_LITTLE_BLOCK(len0) && !UNALIGNED_X_Y(src, dst))
{
aligned_dst = (long*)dst;
aligned_src = (long*)src;