aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTamar Christina <tamar.christina@arm.com>2021-11-10 15:10:09 +0000
committerTamar Christina <tamar.christina@arm.com>2021-11-10 15:10:09 +0000
commit5ba247ade1cc0ca06a0f7d3483b0520ba98bf2d2 (patch)
treeaae776b0a50ce68a9c73d1de60a969a302d2d772
parent992644c3511acd58248db784f1ac43e2f053ebcc (diff)
downloadgcc-5ba247ade1cc0ca06a0f7d3483b0520ba98bf2d2.zip
gcc-5ba247ade1cc0ca06a0f7d3483b0520ba98bf2d2.tar.gz
gcc-5ba247ade1cc0ca06a0f7d3483b0520ba98bf2d2.tar.bz2
AArch64: Remove shuffle pattern for rounding variant.
This removed the patterns to optimize the rounding shift and narrow. The optimization is valid only for the truncating rounding shift and narrow, for the rounding shift and narrow we need a different pattern that I will submit separately. This wasn't noticed before as the benchmarks did not run conformance as part of the run, which we now do and this now passes again. gcc/ChangeLog: * config/aarch64/aarch64-simd.md (*aarch64_topbits_shuffle<mode>_le ,*aarch64_topbits_shuffle<mode>_be): Remove. gcc/testsuite/ChangeLog: * gcc.target/aarch64/shrn-combine-8.c: Update. * gcc.target/aarch64/shrn-combine-9.c: Update.
-rw-r--r--gcc/config/aarch64/aarch64-simd.md32
-rw-r--r--gcc/testsuite/gcc.target/aarch64/shrn-combine-8.c2
-rw-r--r--gcc/testsuite/gcc.target/aarch64/shrn-combine-9.c2
3 files changed, 2 insertions, 34 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 35d55a3..54d7ca4 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1887,22 +1887,6 @@
[(set_attr "type" "neon_permute<q>")]
)
-(define_insn "*aarch64_topbits_shuffle<mode>_le"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
- (vec_concat:<VNARROWQ2>
- (unspec:<VNARROWQ> [
- (match_operand:VQN 1 "register_operand" "w")
- (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_exact_top")
- ] UNSPEC_RSHRN)
- (unspec:<VNARROWQ> [
- (match_operand:VQN 3 "register_operand" "w")
- (match_dup 2)
- ] UNSPEC_RSHRN)))]
- "TARGET_SIMD && !BYTES_BIG_ENDIAN"
- "uzp2\\t%0.<V2ntype>, %1.<V2ntype>, %3.<V2ntype>"
- [(set_attr "type" "neon_permute<q>")]
-)
-
(define_insn "*aarch64_<srn_op>topbits_shuffle<mode>_be"
[(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
(vec_concat:<VNARROWQ2>
@@ -1917,22 +1901,6 @@
[(set_attr "type" "neon_permute<q>")]
)
-(define_insn "*aarch64_topbits_shuffle<mode>_be"
- [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
- (vec_concat:<VNARROWQ2>
- (unspec:<VNARROWQ> [
- (match_operand:VQN 3 "register_operand" "w")
- (match_operand:VQN 2 "aarch64_simd_shift_imm_vec_exact_top")
- ] UNSPEC_RSHRN)
- (unspec:<VNARROWQ> [
- (match_operand:VQN 1 "register_operand" "w")
- (match_dup 2)
- ] UNSPEC_RSHRN)))]
- "TARGET_SIMD && BYTES_BIG_ENDIAN"
- "uzp2\\t%0.<V2ntype>, %1.<V2ntype>, %3.<V2ntype>"
- [(set_attr "type" "neon_permute<q>")]
-)
-
(define_expand "aarch64_shrn<mode>"
[(set (match_operand:<VNARROWQ> 0 "register_operand")
(truncate:<VNARROWQ>
diff --git a/gcc/testsuite/gcc.target/aarch64/shrn-combine-8.c b/gcc/testsuite/gcc.target/aarch64/shrn-combine-8.c
index 6a47f3c..c93c179 100644
--- a/gcc/testsuite/gcc.target/aarch64/shrn-combine-8.c
+++ b/gcc/testsuite/gcc.target/aarch64/shrn-combine-8.c
@@ -6,7 +6,7 @@
uint8x16_t foo (uint16x8_t a, uint16x8_t b)
{
- return vrshrn_high_n_u16 (vrshrn_n_u16 (a, 8), b, 8);
+ return vshrn_high_n_u16 (vshrn_n_u16 (a, 8), b, 8);
}
/* { dg-final { scan-assembler-times {\tuzp2\t} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/shrn-combine-9.c b/gcc/testsuite/gcc.target/aarch64/shrn-combine-9.c
index 929a55c..bdb3c13 100644
--- a/gcc/testsuite/gcc.target/aarch64/shrn-combine-9.c
+++ b/gcc/testsuite/gcc.target/aarch64/shrn-combine-9.c
@@ -6,7 +6,7 @@
uint16x8_t foo (uint32x4_t a, uint32x4_t b)
{
- return vrshrn_high_n_u32 (vrshrn_n_u32 (a, 16), b, 16);
+ return vshrn_high_n_u32 (vshrn_n_u32 (a, 16), b, 16);
}
/* { dg-final { scan-assembler-times {\tuzp2\t} 1 } } */