aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilco Dijkstra <wilco.dijkstra@arm.com>2024-03-21 16:48:33 +0000
committerWilco Dijkstra <wilco.dijkstra@arm.com>2024-04-08 17:08:03 +0100
commit24de733967029fd902c34073d2ab25b900887352 (patch)
treeb8c93346bef5294924553a48ecbcb86896d4f534
parentb77e357bf9bf9972fc33bfbde4269d911eb9b682 (diff)
downloadglibc-24de733967029fd902c34073d2ab25b900887352.zip
glibc-24de733967029fd902c34073d2ab25b900887352.tar.gz
glibc-24de733967029fd902c34073d2ab25b900887352.tar.bz2
AArch64: Check kernel version for SVE ifuncs
Old Linux kernels disable SVE after every system call. Calling the SVE-optimized memcpy afterwards will then cause a trap to reenable SVE. As a result, applications with a high use of syscalls may run slower with the SVE memcpy. This is true for kernels between 4.15.0 and before 6.2.0, except for 5.14.0 which was patched. Avoid this by checking the kernel version and selecting the SVE ifunc on modern kernels. Parse the kernel version reported by uname() into a 24-bit kernel.major.minor value without calling any library functions. If uname() is not supported or if the version format is not recognized, assume the kernel is modern. Tested-by: Florian Weimer <fweimer@redhat.com> Reviewed-by: Szabolcs Nagy <szabolcs.nagy@arm.com> (cherry picked from commit 2e94e2f5d2bf2de124c8ad7da85463355e54ccb2)
-rw-r--r--sysdeps/aarch64/multiarch/init-arch.h2
-rw-r--r--sysdeps/aarch64/multiarch/memcpy.c2
-rw-r--r--sysdeps/aarch64/multiarch/memmove.c2
-rw-r--r--sysdeps/unix/sysv/linux/aarch64/cpu-features.c48
-rw-r--r--sysdeps/unix/sysv/linux/aarch64/cpu-features.h1
5 files changed, 53 insertions, 2 deletions
diff --git a/sysdeps/aarch64/multiarch/init-arch.h b/sysdeps/aarch64/multiarch/init-arch.h
index 5da1656..5b2cf5c 100644
--- a/sysdeps/aarch64/multiarch/init-arch.h
+++ b/sysdeps/aarch64/multiarch/init-arch.h
@@ -36,5 +36,7 @@
MTE_ENABLED (); \
bool __attribute__((unused)) sve = \
GLRO(dl_aarch64_cpu_features).sve; \
+ bool __attribute__((unused)) prefer_sve_ifuncs = \
+ GLRO(dl_aarch64_cpu_features).prefer_sve_ifuncs; \
bool __attribute__((unused)) mops = \
GLRO(dl_aarch64_cpu_features).mops;
diff --git a/sysdeps/aarch64/multiarch/memcpy.c b/sysdeps/aarch64/multiarch/memcpy.c
index d1cf5be..3de66c1 100644
--- a/sysdeps/aarch64/multiarch/memcpy.c
+++ b/sysdeps/aarch64/multiarch/memcpy.c
@@ -47,7 +47,7 @@ select_memcpy_ifunc (void)
{
if (IS_A64FX (midr))
return __memcpy_a64fx;
- return __memcpy_sve;
+ return prefer_sve_ifuncs ? __memcpy_sve : __memcpy_generic;
}
if (IS_THUNDERX (midr))
diff --git a/sysdeps/aarch64/multiarch/memmove.c b/sysdeps/aarch64/multiarch/memmove.c
index 90729e0..fdcf418 100644
--- a/sysdeps/aarch64/multiarch/memmove.c
+++ b/sysdeps/aarch64/multiarch/memmove.c
@@ -47,7 +47,7 @@ select_memmove_ifunc (void)
{
if (IS_A64FX (midr))
return __memmove_a64fx;
- return __memmove_sve;
+ return prefer_sve_ifuncs ? __memmove_sve : __memmove_generic;
}
if (IS_THUNDERX (midr))
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
index 6ee1cb4..2543128 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.c
@@ -20,6 +20,7 @@
#include <sys/auxv.h>
#include <elf/dl-hwcaps.h>
#include <sys/prctl.h>
+#include <sys/utsname.h>
#define DCZID_DZP_MASK (1 << 4)
#define DCZID_BS_MASK (0xf)
@@ -59,6 +60,46 @@ get_midr_from_mcpu (const char *mcpu)
}
#endif
+#if __LINUX_KERNEL_VERSION < 0x060200
+
+/* Return true if we prefer using SVE in string ifuncs. Old kernels disable
+ SVE after every system call which results in unnecessary traps if memcpy
+ uses SVE. This is true for kernels between 4.15.0 and before 6.2.0, except
+ for 5.14.0 which was patched. For these versions return false to avoid using
+ SVE ifuncs.
+ Parse the kernel version into a 24-bit kernel.major.minor value without
+ calling any library functions. If uname() is not supported or if the version
+ format is not recognized, assume the kernel is modern and return true. */
+
+static inline bool
+prefer_sve_ifuncs (void)
+{
+ struct utsname buf;
+ const char *p = &buf.release[0];
+ int kernel = 0;
+ int val;
+
+ if (__uname (&buf) < 0)
+ return true;
+
+ for (int shift = 16; shift >= 0; shift -= 8)
+ {
+ for (val = 0; *p >= '0' && *p <= '9'; p++)
+ val = val * 10 + *p - '0';
+ kernel |= (val & 255) << shift;
+ if (*p++ != '.')
+ break;
+ }
+
+ if (kernel >= 0x060200 || kernel == 0x050e00)
+ return true;
+ if (kernel >= 0x040f00)
+ return false;
+ return true;
+}
+
+#endif
+
static inline void
init_cpu_features (struct cpu_features *cpu_features)
{
@@ -125,6 +166,13 @@ init_cpu_features (struct cpu_features *cpu_features)
/* Check if SVE is supported. */
cpu_features->sve = GLRO (dl_hwcap) & HWCAP_SVE;
+ cpu_features->prefer_sve_ifuncs = cpu_features->sve;
+
+#if __LINUX_KERNEL_VERSION < 0x060200
+ if (cpu_features->sve)
+ cpu_features->prefer_sve_ifuncs = prefer_sve_ifuncs ();
+#endif
+
/* Check if MOPS is supported. */
cpu_features->mops = GLRO (dl_hwcap2) & HWCAP2_MOPS;
}
diff --git a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
index b4bd43a..d51597b 100644
--- a/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
+++ b/sysdeps/unix/sysv/linux/aarch64/cpu-features.h
@@ -71,6 +71,7 @@ struct cpu_features
/* Currently, the GLIBC memory tagging tunable only defines 8 bits. */
uint8_t mte_state;
bool sve;
+ bool prefer_sve_ifuncs;
bool mops;
};