aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAmrita H S <amritahs@linux.ibm.com>2024-05-06 09:01:29 -0500
committerPeter Bergner <bergner@linux.ibm.com>2024-05-06 09:01:29 -0500
commit23f0d81608d0ca6379894ef81670cf30af7fd081 (patch)
tree2c6ec46bf4d7bfb00c46f215a5735cb49db65e31
parentea73eb5f581ef5931fd67005aa0c526ba43366c9 (diff)
downloadglibc-23f0d81608d0ca6379894ef81670cf30af7fd081.zip
glibc-23f0d81608d0ca6379894ef81670cf30af7fd081.tar.gz
glibc-23f0d81608d0ca6379894ef81670cf30af7fd081.tar.bz2
powerpc: Optimized strncmp for power10
This patch is based on __strcmp_power10. Improvements from __strncmp_power9: 1. Uses new POWER10 instructions - This code uses lxvp to decrease contention on load by loading 32 bytes per instruction. 2. Performance implication - This version has around 38% better performance on average. - Minor performance regression is seen for few small sizes and specific combination of alignments. Signed-off-by: Amrita H S <amritahs@linux.ibm.com> Reviewed-by: Peter Bergner <bergner@linux.ibm.com>
-rw-r--r--sysdeps/powerpc/powerpc64/le/power10/strncmp.S271
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/Makefile2
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c3
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp-power10.S25
-rw-r--r--sysdeps/powerpc/powerpc64/multiarch/strncmp.c4
5 files changed, 304 insertions, 1 deletions
diff --git a/sysdeps/powerpc/powerpc64/le/power10/strncmp.S b/sysdeps/powerpc/powerpc64/le/power10/strncmp.S
new file mode 100644
index 0000000..d4ba76a
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/le/power10/strncmp.S
@@ -0,0 +1,271 @@
+/* Optimized strncmp implementation for PowerPC64/POWER10.
+ Copyright (C) 2024 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <sysdep.h>
+
+/* Implements the function
+
+ int [r3] strncmp (const char *s1 [r3], const char *s2 [r4], size_t [r5] n)
+
+ The implementation uses unaligned doubleword access to avoid specialized
+ code paths depending of data alignment for first 32 bytes and uses
+ vectorised loops after that. */
+
+#ifndef STRNCMP
+# define STRNCMP strncmp
+#endif
+
+/* TODO: Change this to actual instructions when minimum binutils is upgraded
+ to 2.27. Macros are defined below for these newer instructions in order
+ to maintain compatibility. */
+
+#define LXVP(xtp,dq,ra) \
+ .long(((6)<<(32-6)) \
+ | ((((xtp)-32)>>1)<<(32-10)) \
+ | ((1)<<(32-11)) \
+ | ((ra)<<(32-16)) \
+ | dq)
+
+#define COMPARE_16(vreg1,vreg2,offset) \
+ lxv vreg1+32,offset(r3); \
+ lxv vreg2+32,offset(r4); \
+ vcmpnezb. v7,vreg1,vreg2; \
+ bne cr6,L(different); \
+ cmpldi cr7,r5,16; \
+ ble cr7,L(ret0); \
+ addi r5,r5,-16;
+
+#define COMPARE_32(vreg1,vreg2,offset,label1,label2) \
+ LXVP(vreg1+32,offset,r3); \
+ LXVP(vreg2+32,offset,r4); \
+ vcmpnezb. v7,vreg1+1,vreg2+1; \
+ bne cr6,L(label1); \
+ vcmpnezb. v7,vreg1,vreg2; \
+ bne cr6,L(label2); \
+ cmpldi cr7,r5,32; \
+ ble cr7,L(ret0); \
+ addi r5,r5,-32;
+
+#define TAIL_FIRST_16B(vreg1,vreg2) \
+ vctzlsbb r6,v7; \
+ cmpld cr7,r5,r6; \
+ ble cr7,L(ret0); \
+ vextubrx r5,r6,vreg1; \
+ vextubrx r4,r6,vreg2; \
+ subf r3,r4,r5; \
+ blr;
+
+#define TAIL_SECOND_16B(vreg1,vreg2) \
+ vctzlsbb r6,v7; \
+ addi r0,r6,16; \
+ cmpld cr7,r5,r0; \
+ ble cr7,L(ret0); \
+ vextubrx r5,r6,vreg1; \
+ vextubrx r4,r6,vreg2; \
+ subf r3,r4,r5; \
+ blr;
+
+#define CHECK_N_BYTES(reg1,reg2,len_reg) \
+ sldi r6,len_reg,56; \
+ lxvl 32+v4,reg1,r6; \
+ lxvl 32+v5,reg2,r6; \
+ add reg1,reg1,len_reg; \
+ add reg2,reg2,len_reg; \
+ vcmpnezb v7,v4,v5; \
+ vctzlsbb r6,v7; \
+ cmpld cr7,r6,len_reg; \
+ blt cr7,L(different); \
+ cmpld cr7,r5,len_reg; \
+ ble cr7,L(ret0); \
+ sub r5,r5,len_reg; \
+
+ /* TODO: change this to .machine power10 when the minimum required
+ binutils allows it. */
+ .machine power9
+ENTRY_TOCLESS (STRNCMP, 4)
+ /* Check if size is 0. */
+ cmpdi cr0,r5,0
+ beq cr0,L(ret0)
+ andi. r7,r3,4095
+ andi. r8,r4,4095
+ cmpldi cr0,r7,4096-16
+ cmpldi cr1,r8,4096-16
+ bgt cr0,L(crosses)
+ bgt cr1,L(crosses)
+ COMPARE_16(v4,v5,0)
+ addi r3,r3,16
+ addi r4,r4,16
+
+L(crosses):
+ andi. r7,r3,15
+ subfic r7,r7,16 /* r7(nalign1) = 16 - (str1 & 15). */
+ andi. r9,r4,15
+ subfic r8,r9,16 /* r8(nalign2) = 16 - (str2 & 15). */
+ cmpld cr7,r7,r8
+ beq cr7,L(same_aligned)
+ blt cr7,L(nalign1_min)
+
+ /* nalign2 is minimum and s2 pointer is aligned. */
+ CHECK_N_BYTES(r3,r4,r8)
+ /* Are we on the 64B hunk which crosses a page? */
+ andi. r10,r3,63 /* Determine offset into 64B hunk. */
+ andi. r8,r3,15 /* The offset into the 16B hunk. */
+ neg r7,r3
+ andi. r9,r7,15 /* Number of bytes after a 16B cross. */
+ rlwinm. r7,r7,26,0x3F /* ((r4-4096))>>6&63. */
+ beq L(compare_64_pagecross)
+ mtctr r7
+ b L(compare_64B_unaligned)
+
+ /* nalign1 is minimum and s1 pointer is aligned. */
+L(nalign1_min):
+ CHECK_N_BYTES(r3,r4,r7)
+ /* Are we on the 64B hunk which crosses a page? */
+ andi. r10,r4,63 /* Determine offset into 64B hunk. */
+ andi. r8,r4,15 /* The offset into the 16B hunk. */
+ neg r7,r4
+ andi. r9,r7,15 /* Number of bytes after a 16B cross. */
+ rlwinm. r7,r7,26,0x3F /* ((r4-4096))>>6&63. */
+ beq L(compare_64_pagecross)
+ mtctr r7
+
+ .p2align 5
+L(compare_64B_unaligned):
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ COMPARE_16(v4,v5,32)
+ COMPARE_16(v4,v5,48)
+ addi r3,r3,64
+ addi r4,r4,64
+ bdnz L(compare_64B_unaligned)
+
+ /* Cross the page boundary of s2, carefully. Only for first
+ iteration we have to get the count of 64B blocks to be checked.
+ From second iteration and beyond, loop counter is always 63. */
+L(compare_64_pagecross):
+ li r11, 63
+ mtctr r11
+ cmpldi r10,16
+ ble L(cross_4)
+ cmpldi r10,32
+ ble L(cross_3)
+ cmpldi r10,48
+ ble L(cross_2)
+L(cross_1):
+ CHECK_N_BYTES(r3,r4,r9)
+ CHECK_N_BYTES(r3,r4,r8)
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ COMPARE_16(v4,v5,32)
+ addi r3,r3,48
+ addi r4,r4,48
+ b L(compare_64B_unaligned)
+L(cross_2):
+ COMPARE_16(v4,v5,0)
+ addi r3,r3,16
+ addi r4,r4,16
+ CHECK_N_BYTES(r3,r4,r9)
+ CHECK_N_BYTES(r3,r4,r8)
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ addi r3,r3,32
+ addi r4,r4,32
+ b L(compare_64B_unaligned)
+L(cross_3):
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ addi r3,r3,32
+ addi r4,r4,32
+ CHECK_N_BYTES(r3,r4,r9)
+ CHECK_N_BYTES(r3,r4,r8)
+ COMPARE_16(v4,v5,0)
+ addi r3,r3,16
+ addi r4,r4,16
+ b L(compare_64B_unaligned)
+L(cross_4):
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ COMPARE_16(v4,v5,32)
+ addi r3,r3,48
+ addi r4,r4,48
+ CHECK_N_BYTES(r3,r4,r9)
+ CHECK_N_BYTES(r3,r4,r8)
+ b L(compare_64B_unaligned)
+
+L(same_aligned):
+ CHECK_N_BYTES(r3,r4,r7)
+ /* Align s1 to 32B and adjust s2 address.
+ Use lxvp only if both s1 and s2 are 32B aligned. */
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ COMPARE_16(v4,v5,32)
+ COMPARE_16(v4,v5,48)
+ addi r3,r3,64
+ addi r4,r4,64
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ addi r5,r5,32
+
+ clrldi r6,r3,59
+ subfic r7,r6,32
+ add r3,r3,r7
+ add r4,r4,r7
+ subf r5,r7,r5
+ andi. r7,r4,0x1F
+ beq cr0,L(32B_aligned_loop)
+
+ .p2align 5
+L(16B_aligned_loop):
+ COMPARE_16(v4,v5,0)
+ COMPARE_16(v4,v5,16)
+ COMPARE_16(v4,v5,32)
+ COMPARE_16(v4,v5,48)
+ addi r3,r3,64
+ addi r4,r4,64
+ b L(16B_aligned_loop)
+
+ /* Calculate and return the difference. */
+L(different):
+ TAIL_FIRST_16B(v4,v5)
+
+ .p2align 5
+L(32B_aligned_loop):
+ COMPARE_32(v14,v16,0,tail1,tail2)
+ COMPARE_32(v18,v20,32,tail3,tail4)
+ COMPARE_32(v22,v24,64,tail5,tail6)
+ COMPARE_32(v26,v28,96,tail7,tail8)
+ addi r3,r3,128
+ addi r4,r4,128
+ b L(32B_aligned_loop)
+
+L(tail1): TAIL_FIRST_16B(v15,v17)
+L(tail2): TAIL_SECOND_16B(v14,v16)
+L(tail3): TAIL_FIRST_16B(v19,v21)
+L(tail4): TAIL_SECOND_16B(v18,v20)
+L(tail5): TAIL_FIRST_16B(v23,v25)
+L(tail6): TAIL_SECOND_16B(v22,v24)
+L(tail7): TAIL_FIRST_16B(v27,v29)
+L(tail8): TAIL_SECOND_16B(v26,v28)
+
+ .p2align 5
+L(ret0):
+ li r3,0
+ blr
+
+END(STRNCMP)
+libc_hidden_builtin_def(strncmp)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/Makefile b/sysdeps/powerpc/powerpc64/multiarch/Makefile
index a38ff46..b847c19 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/Makefile
+++ b/sysdeps/powerpc/powerpc64/multiarch/Makefile
@@ -34,7 +34,7 @@ ifneq (,$(filter %le,$(config-machine)))
sysdep_routines += memchr-power10 memcmp-power10 memcpy-power10 \
memmove-power10 memset-power10 rawmemchr-power9 \
rawmemchr-power10 strcmp-power9 strcmp-power10 \
- strncmp-power9 strcpy-power9 stpcpy-power9 \
+ strncmp-power9 strncmp-power10 strcpy-power9 stpcpy-power9 \
strlen-power9 strncpy-power9 stpncpy-power9 strlen-power10
endif
CFLAGS-strncase-power7.c += -mcpu=power7 -funroll-loops
diff --git a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
index 30fd89e..2bb47d3 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/ifunc-impl-list.c
@@ -164,6 +164,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
/* Support sysdeps/powerpc/powerpc64/multiarch/strncmp.c. */
IFUNC_IMPL (i, name, strncmp,
#ifdef __LITTLE_ENDIAN__
+ IFUNC_IMPL_ADD (array, i, strncmp, hwcap2 & PPC_FEATURE2_ARCH_3_1
+ && hwcap & PPC_FEATURE_HAS_VSX,
+ __strncmp_power10)
IFUNC_IMPL_ADD (array, i, strncmp, hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC,
__strncmp_power9)
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp-power10.S b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power10.S
new file mode 100644
index 0000000..d7026c1
--- /dev/null
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp-power10.S
@@ -0,0 +1,25 @@
+/* Copyright (C) 2024 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#if defined __LITTLE_ENDIAN__ && IS_IN (libc)
+#define STRNCMP __strncmp_power10
+
+#undef libc_hidden_builtin_def
+#define libc_hidden_builtin_def(name)
+
+#include <sysdeps/powerpc/powerpc64/le/power10/strncmp.S>
+#endif
diff --git a/sysdeps/powerpc/powerpc64/multiarch/strncmp.c b/sysdeps/powerpc/powerpc64/multiarch/strncmp.c
index 6178f4a..a5ed67f 100644
--- a/sysdeps/powerpc/powerpc64/multiarch/strncmp.c
+++ b/sysdeps/powerpc/powerpc64/multiarch/strncmp.c
@@ -29,6 +29,7 @@ extern __typeof (strncmp) __strncmp_ppc attribute_hidden;
extern __typeof (strncmp) __strncmp_power8 attribute_hidden;
# ifdef __LITTLE_ENDIAN__
extern __typeof (strncmp) __strncmp_power9 attribute_hidden;
+extern __typeof (strncmp) __strncmp_power10 attribute_hidden;
# endif
# undef strncmp
@@ -36,6 +37,9 @@ extern __typeof (strncmp) __strncmp_power9 attribute_hidden;
ifunc symbol properly. */
libc_ifunc_redirected (__redirect_strncmp, strncmp,
# ifdef __LITTLE_ENDIAN__
+ (hwcap2 & PPC_FEATURE2_ARCH_3_1
+ && hwcap & PPC_FEATURE_HAS_VSX)
+ ? __strncmp_power10 :
(hwcap2 & PPC_FEATURE2_ARCH_3_00
&& hwcap & PPC_FEATURE_HAS_ALTIVEC)
? __strncmp_power9 :