aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/aarch64/multiarch/memmove.c
diff options
context:
space:
mode:
authorNaohiro Tamura <naohirot@jp.fujitsu.com>2021-05-27 07:42:35 +0000
committerSzabolcs Nagy <szabolcs.nagy@arm.com>2021-05-27 09:47:53 +0100
commitfa527f345cbbe852ec085932fbea979956c195b5 (patch)
tree6d84a3c41c4ed9ac4ffce5c33e7448dd70075e87 /sysdeps/aarch64/multiarch/memmove.c
parentf12ec02f5389a443d892241c486d87b3c5940ff6 (diff)
downloadglibc-fa527f345cbbe852ec085932fbea979956c195b5.zip
glibc-fa527f345cbbe852ec085932fbea979956c195b5.tar.gz
glibc-fa527f345cbbe852ec085932fbea979956c195b5.tar.bz2
aarch64: Added optimized memcpy and memmove for A64FX
This patch optimizes the performance of memcpy/memmove for A64FX [1] which implements ARMv8-A SVE and has L1 64KB cache per core and L2 8MB cache per NUMA node. The performance optimization makes use of Scalable Vector Register with several techniques such as loop unrolling, memory access alignment, cache zero fill, and software pipelining. SVE assembler code for memcpy/memmove is implemented as Vector Length Agnostic code so theoretically it can be run on any SOC which supports ARMv8-A SVE standard. We confirmed that all testcases have been passed by running 'make check' and 'make xcheck' not only on A64FX but also on ThunderX2. And also we confirmed that the SVE 512 bit vector register performance is roughly 4 times better than Advanced SIMD 128 bit register and 8 times better than scalar 64 bit register by running 'make bench'. [1] https://github.com/fujitsu/A64FX Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com> Reviewed-by: Szabolcs Nagy <Szabolcs.Nagy@arm.com>
Diffstat (limited to 'sysdeps/aarch64/multiarch/memmove.c')
-rw-r--r--sysdeps/aarch64/multiarch/memmove.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/sysdeps/aarch64/multiarch/memmove.c b/sysdeps/aarch64/multiarch/memmove.c
index 12d7781..d0adefc 100644
--- a/sysdeps/aarch64/multiarch/memmove.c
+++ b/sysdeps/aarch64/multiarch/memmove.c
@@ -33,6 +33,9 @@ extern __typeof (__redirect_memmove) __memmove_simd attribute_hidden;
extern __typeof (__redirect_memmove) __memmove_thunderx attribute_hidden;
extern __typeof (__redirect_memmove) __memmove_thunderx2 attribute_hidden;
extern __typeof (__redirect_memmove) __memmove_falkor attribute_hidden;
+# if HAVE_AARCH64_SVE_ASM
+extern __typeof (__redirect_memmove) __memmove_a64fx attribute_hidden;
+# endif
libc_ifunc (__libc_memmove,
(IS_THUNDERX (midr)
@@ -40,12 +43,17 @@ libc_ifunc (__libc_memmove,
: (IS_FALKOR (midr) || IS_PHECDA (midr)
? __memmove_falkor
: (IS_THUNDERX2 (midr) || IS_THUNDERX2PA (midr)
- ? __memmove_thunderx2
- : (IS_NEOVERSE_N1 (midr) || IS_NEOVERSE_N2 (midr)
- || IS_NEOVERSE_V1 (midr)
- ? __memmove_simd
+ ? __memmove_thunderx2
+ : (IS_NEOVERSE_N1 (midr) || IS_NEOVERSE_N2 (midr)
+ || IS_NEOVERSE_V1 (midr)
+ ? __memmove_simd
+# if HAVE_AARCH64_SVE_ASM
+ : (IS_A64FX (midr)
+ ? __memmove_a64fx
+ : __memmove_generic))))));
+# else
: __memmove_generic)))));
-
+# endif
# undef memmove
strong_alias (__libc_memmove, memmove);
#endif