aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilco Dijkstra <wilco.dijkstra@arm.com>2023-02-01 18:45:19 +0000
committerWilco Dijkstra <wilco.dijkstra@arm.com>2023-02-06 16:15:34 +0000
commitd2d3f3720ce627a4fe154d8dd14db716a32bcc6e (patch)
treee1220882ec6dd343a88486c52a22d7cacf244a2a
parentc980549cc6a1c03c23cc2fe3e7b0fe626a0364b0 (diff)
downloadglibc-d2d3f3720ce627a4fe154d8dd14db716a32bcc6e.zip
glibc-d2d3f3720ce627a4fe154d8dd14db716a32bcc6e.tar.gz
glibc-d2d3f3720ce627a4fe154d8dd14db716a32bcc6e.tar.bz2
AArch64: Improve SVE memcpy and memmove
Improve SVE memcpy by copying 2 vectors if the size is small enough. This improves performance of random memcpy by ~9% on Neoverse V1, and 33-64 byte copies are ~16% faster. Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com>
-rw-r--r--sysdeps/aarch64/multiarch/memcpy_sve.S34
1 files changed, 14 insertions, 20 deletions
diff --git a/sysdeps/aarch64/multiarch/memcpy_sve.S b/sysdeps/aarch64/multiarch/memcpy_sve.S
index f4dc214..d11be6a 100644
--- a/sysdeps/aarch64/multiarch/memcpy_sve.S
+++ b/sysdeps/aarch64/multiarch/memcpy_sve.S
@@ -67,14 +67,15 @@ ENTRY (__memcpy_sve)
cmp count, 128
b.hi L(copy_long)
- cmp count, 32
+ cntb vlen
+ cmp count, vlen, lsl 1
b.hi L(copy32_128)
-
whilelo p0.b, xzr, count
- cntb vlen
- tbnz vlen, 4, L(vlen128)
- ld1b z0.b, p0/z, [src]
- st1b z0.b, p0, [dstin]
+ whilelo p1.b, vlen, count
+ ld1b z0.b, p0/z, [src, 0, mul vl]
+ ld1b z1.b, p1/z, [src, 1, mul vl]
+ st1b z0.b, p0, [dstin, 0, mul vl]
+ st1b z1.b, p1, [dstin, 1, mul vl]
ret
/* Medium copies: 33..128 bytes. */
@@ -102,14 +103,6 @@ L(copy96):
stp C_q, D_q, [dstend, -32]
ret
-L(vlen128):
- whilelo p1.b, vlen, count
- ld1b z0.b, p0/z, [src, 0, mul vl]
- ld1b z1.b, p1/z, [src, 1, mul vl]
- st1b z0.b, p0, [dstin, 0, mul vl]
- st1b z1.b, p1, [dstin, 1, mul vl]
- ret
-
.p2align 4
/* Copy more than 128 bytes. */
L(copy_long):
@@ -158,14 +151,15 @@ ENTRY (__memmove_sve)
cmp count, 128
b.hi L(move_long)
- cmp count, 32
+ cntb vlen
+ cmp count, vlen, lsl 1
b.hi L(copy32_128)
-
whilelo p0.b, xzr, count
- cntb vlen
- tbnz vlen, 4, L(vlen128)
- ld1b z0.b, p0/z, [src]
- st1b z0.b, p0, [dstin]
+ whilelo p1.b, vlen, count
+ ld1b z0.b, p0/z, [src, 0, mul vl]
+ ld1b z1.b, p1/z, [src, 1, mul vl]
+ st1b z0.b, p0, [dstin, 0, mul vl]
+ st1b z1.b, p1, [dstin, 1, mul vl]
ret
.p2align 4