aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKyrylo Tkachov <kyrylo.tkachov@arm.com>2023-06-20 11:03:47 +0100
committerKyrylo Tkachov <kyrylo.tkachov@arm.com>2023-06-20 11:03:47 +0100
commit63aaff9b3ab0fed154b2b5ec09f5a0e68c1e5ca7 (patch)
treed8518300cd7d4c711ac355f267f168b716054033
parent36de416df8b3f109353e309011061fa66e872e3a (diff)
downloadgcc-63aaff9b3ab0fed154b2b5ec09f5a0e68c1e5ca7.zip
gcc-63aaff9b3ab0fed154b2b5ec09f5a0e68c1e5ca7.tar.gz
gcc-63aaff9b3ab0fed154b2b5ec09f5a0e68c1e5ca7.tar.bz2
aarch64: Optimise ADDP with same source operands
We've been asked to optimise the testcase in this patch of a 64-bit ADDP with the low and high halves of the same 128-bit vector. This can be done by a single .4s ADDP followed by just reading the bottom 64 bits. A splitter for this is quite straightforward now that all the vec_concat stuff is collapsed by simplify-rtx. With this patch we generate a single: addp v0.4s, v0.4s, v0.4s instead of: dup d31, v0.d[1] addp v0.2s, v0.2s, v31.2s ret Bootstrapped and tested on aarch64-none-linux-gnu and aarch64_be-none-elf. gcc/ChangeLog: * config/aarch64/aarch64-simd.md (*aarch64_addp_same_reg<mode>): New define_insn_and_split. gcc/testsuite/ChangeLog: * gcc.target/aarch64/simd/addp-same-low_1.c: New test.
-rw-r--r--gcc/config/aarch64/aarch64-simd.md30
-rw-r--r--gcc/testsuite/gcc.target/aarch64/simd/addp-same-low_1.c20
2 files changed, 50 insertions, 0 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index cd04cbd..90118c6 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -7327,6 +7327,36 @@
[(set_attr "type" "neon_reduc_add<q>")]
)
+;; A common usecase of 64-bit ADDP is to have both operands come from the same
+;; 128-bit vector and produce the pairwise addition results in the lower half.
+;; Split into the 128-bit ADDP form and extract the low half.
+(define_insn_and_split "*aarch64_addp_same_reg<mode>"
+ [(set (match_operand:<VHALF> 0 "register_operand" "=w")
+ (plus:<VHALF>
+ (vec_select:<VHALF>
+ (match_operand:VQ_I 1 "register_operand" "w")
+ (match_operand:VQ_I 2 "vect_par_cnst_even_or_odd_half"))
+ (vec_select:<VHALF>
+ (match_dup 1)
+ (match_operand:VQ_I 3 "vect_par_cnst_even_or_odd_half"))))]
+ "TARGET_SIMD && !rtx_equal_p (operands[2], operands[3])"
+ "#"
+ "&& 1"
+ [(const_int 0)]
+ {
+ rtx scratch;
+ if (can_create_pseudo_p ())
+ scratch = gen_reg_rtx (<MODE>mode);
+ else
+ scratch = lowpart_subreg (<MODE>mode, operands[0], <VHALF>mode);
+
+ emit_insn (gen_aarch64_addp<mode>_insn (scratch, operands[1], operands[1],
+ operands[2], operands[3]));
+ emit_move_insn (operands[0], gen_lowpart (<VHALF>mode, scratch));
+ DONE;
+ }
+)
+
(define_expand "aarch64_addp<mode>"
[(match_operand:VDQ_I 0 "register_operand")
(match_operand:VDQ_I 1 "register_operand")
diff --git a/gcc/testsuite/gcc.target/aarch64/simd/addp-same-low_1.c b/gcc/testsuite/gcc.target/aarch64/simd/addp-same-low_1.c
new file mode 100644
index 0000000..42d8db7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/simd/addp-same-low_1.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O" } */
+/* { dg-require-effective-target aarch64_little_endian } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#include <arm_neon.h>
+
+
+/*
+** foo:
+** addp v0\.4s, v0\.4s, v0\.4s
+** ret
+*/
+
+int32x2_t
+foo (int32x4_t a)
+{
+ return vpadd_s32 (vget_low_s32(a), vget_high_s32(a));
+}
+