aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHaochen Gui <guihaoc@gcc.gnu.org>2023-12-27 10:30:06 +0800
committerHaochen Gui <guihaoc@gcc.gnu.org>2023-12-27 10:35:13 +0800
commit78bd9e25601a2b7162ad71f714861a05cf994488 (patch)
tree74c84fe92064d391dce0ae03f902f78e9592bba1
parentf2d47aa70ee4e72f7095a9f166cb45fc043f75cc (diff)
downloadgcc-78bd9e25601a2b7162ad71f714861a05cf994488.zip
gcc-78bd9e25601a2b7162ad71f714861a05cf994488.tar.gz
gcc-78bd9e25601a2b7162ad71f714861a05cf994488.tar.bz2
rs6000: Correct definition of macro of fixed point efficient unaligned
Marco TARGET_EFFICIENT_OVERLAPPING_UNALIGNED is used in rs6000-string.cc to guard the platform which is efficient on fixed point unaligned load/store. It's originally defined by TARGET_EFFICIENT_UNALIGNED_VSX which is enabled from P8 and can be disabled by mno-vsx option. So the definition is improper. This patch corrects it and call slow_unaligned_access to judge if fixed point unaligned load/store is efficient or not. gcc/ * config/rs6000/rs6000.h (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED): Remove. * config/rs6000/rs6000-string.cc (select_block_compare_mode): Replace TARGET_EFFICIENT_OVERLAPPING_UNALIGNED with targetm.slow_unaligned_access. (expand_block_compare_gpr): Likewise. (expand_block_compare): Likewise. (expand_strncmp_gpr_sequence): Likewise. gcc/testsuite/ * gcc.target/powerpc/block-cmp-1.c: New. * gcc.target/powerpc/block-cmp-2.c: New.
-rw-r--r--gcc/config/rs6000/rs6000-string.cc15
-rw-r--r--gcc/config/rs6000/rs6000.h4
-rw-r--r--gcc/testsuite/gcc.target/powerpc/block-cmp-1.c11
-rw-r--r--gcc/testsuite/gcc.target/powerpc/block-cmp-2.c12
4 files changed, 32 insertions, 10 deletions
diff --git a/gcc/config/rs6000/rs6000-string.cc b/gcc/config/rs6000/rs6000-string.cc
index 44a946c..fa80298 100644
--- a/gcc/config/rs6000/rs6000-string.cc
+++ b/gcc/config/rs6000/rs6000-string.cc
@@ -305,7 +305,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
else if (bytes == GET_MODE_SIZE (QImode))
return QImode;
else if (bytes < GET_MODE_SIZE (SImode)
- && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+ && !targetm.slow_unaligned_access (SImode, align * BITS_PER_UNIT)
&& offset >= GET_MODE_SIZE (SImode) - bytes)
/* This matches the case were we have SImode and 3 bytes
and offset >= 1 and permits us to move back one and overlap
@@ -313,7 +313,7 @@ select_block_compare_mode (unsigned HOST_WIDE_INT offset,
unwanted bytes off of the input. */
return SImode;
else if (word_mode_ok && bytes < UNITS_PER_WORD
- && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+ && !targetm.slow_unaligned_access (word_mode, align * BITS_PER_UNIT)
&& offset >= UNITS_PER_WORD-bytes)
/* Similarly, if we can use DImode it will get matched here and
can do an overlapping read that ends at the end of the block. */
@@ -1749,7 +1749,8 @@ expand_block_compare_gpr(unsigned HOST_WIDE_INT bytes, unsigned int base_align,
load_mode_size = GET_MODE_SIZE (load_mode);
if (bytes >= load_mode_size)
cmp_bytes = load_mode_size;
- else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
+ else if (!targetm.slow_unaligned_access (load_mode,
+ align * BITS_PER_UNIT))
{
/* Move this load back so it doesn't go past the end.
P8/P9 can do this efficiently. */
@@ -1987,7 +1988,8 @@ expand_block_compare (rtx operands[])
if (!CONST_INT_P (align_rtx))
return false;
- unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;
+ unsigned int align_by_bits = UINTVAL (align_rtx);
+ unsigned int base_align = align_by_bits / BITS_PER_UNIT;
gcc_assert (GET_MODE (target) == SImode);
@@ -2026,7 +2028,7 @@ expand_block_compare (rtx operands[])
/* The code generated for p7 and older is not faster than glibc
memcmp if alignment is small and length is not short, so bail
out to avoid those conditions. */
- if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+ if (targetm.slow_unaligned_access (word_mode, align_by_bits)
&& ((base_align == 1 && bytes > 16)
|| (base_align == 2 && bytes > 32)))
return false;
@@ -2168,7 +2170,8 @@ expand_strncmp_gpr_sequence (unsigned HOST_WIDE_INT bytes_to_compare,
load_mode_size = GET_MODE_SIZE (load_mode);
if (bytes_to_compare >= load_mode_size)
cmp_bytes = load_mode_size;
- else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
+ else if (!targetm.slow_unaligned_access (load_mode,
+ align * BITS_PER_UNIT))
{
/* Move this load back so it doesn't go past the end.
P8/P9 can do this efficiently. */
diff --git a/gcc/config/rs6000/rs6000.h b/gcc/config/rs6000/rs6000.h
index 98e046f..00a8f3d 100644
--- a/gcc/config/rs6000/rs6000.h
+++ b/gcc/config/rs6000/rs6000.h
@@ -485,10 +485,6 @@ extern int rs6000_vector_align[];
#define TARGET_NO_SF_SUBREG TARGET_DIRECT_MOVE_64BIT
#define TARGET_ALLOW_SF_SUBREG (!TARGET_DIRECT_MOVE_64BIT)
-/* This wants to be set for p8 and newer. On p7, overlapping unaligned
- loads are slow. */
-#define TARGET_EFFICIENT_OVERLAPPING_UNALIGNED TARGET_EFFICIENT_UNALIGNED_VSX
-
/* Byte/char syncs were added as phased in for ISA 2.06B, but are not present
in power7, so conditionalize them on p8 features. TImode syncs need quad
memory support. */
diff --git a/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c b/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c
new file mode 100644
index 0000000..bcf0cb2
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/block-cmp-1.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -mdejagnu-cpu=power8 -mno-vsx" } */
+/* { dg-final { scan-assembler-not {\mb[l]? memcmp\M} } } */
+
+/* Test that it still can do expand for memcmpsi instead of calling library
+ on P8 with vsx disabled. */
+
+int foo (const char* s1, const char* s2)
+{
+ return __builtin_memcmp (s1, s2, 20);
+}
diff --git a/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c b/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c
new file mode 100644
index 0000000..dfee15b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/block-cmp-2.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target opt_mstrict_align } */
+/* { dg-options "-O2 -mstrict-align" } */
+/* { dg-final { scan-assembler-times {\mb[l]? memcmp\M} 1 } } */
+
+/* Test that it calls library for block memory compare when strict-align
+ is set. The flag causes rs6000_slow_unaligned_access returns true. */
+
+int foo (const char* s1, const char* s2)
+{
+ return __builtin_memcmp (s1, s2, 20);
+}