diff options
author | Kyrylo Tkachov <kyrylo.tkachov@arm.com> | 2023-04-24 09:28:35 +0100 |
---|---|---|
committer | Kyrylo Tkachov <kyrylo.tkachov@arm.com> | 2023-04-24 09:28:35 +0100 |
commit | 6ec565d8755afe1c187cda69fb8e478e669cfd02 (patch) | |
tree | a12071cd03cd6f7ee7d6aeab492cc198126340f4 | |
parent | 60bf26a412a9ec2b467c04fac1dfacef2ef09c6d (diff) | |
download | gcc-6ec565d8755afe1c187cda69fb8e478e669cfd02.zip gcc-6ec565d8755afe1c187cda69fb8e478e669cfd02.tar.gz gcc-6ec565d8755afe1c187cda69fb8e478e669cfd02.tar.bz2 |
aarch64: Add pattern to match zero-extending scalar result of ADDLV
The vaddlv_u8 and vaddlv_u16 intrinsics produce a widened scalar result (uint16_t and uint32_t).
The ADDLV instructions themselves zero the rest of the V register, which gives us a free zero-extension
to 32 and 64 bits, similar to how it works on the GP reg side.
Because we don't model that zero-extension in the machine description this can cause GCC to move the
results of these instructions to the GP regs just to do a (superfluous) zero-extension.
This patch just adds a pattern to catch these cases. For the testcases we can now generate no zero-extends
or GP<->FP reg moves, whereas before we generated stuff like:
foo_8_32:
uaddlv h0, v0.8b
umov w1, v0.h[0] // FP<->GP move with zero-extension!
str w1, [x0]
ret
Bootstrapped and tested on aarch64-none-linux-gnu.
gcc/ChangeLog:
* config/aarch64/aarch64-simd.md
(*aarch64_<su>addlv<VDQV_L:mode>_ze<GPI:mode>): New pattern.
gcc/testsuite/ChangeLog:
* gcc.target/aarch64/simd/addlv_zext.c: New test.
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 16 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/aarch64/simd/addlv_zext.c | 84 |
2 files changed, 100 insertions, 0 deletions
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 7bd4362..d1e74a6 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -3521,6 +3521,22 @@ [(set_attr "type" "neon_reduc_add<q>")] ) +;; Zero-extending version of the above. As these intrinsics produce a scalar +;; value that may be used by further intrinsics we want to avoid moving the +;; result into GP regs to do a zero-extension that ADDLV/ADDLP gives for free. + +(define_insn "*aarch64_<su>addlv<VDQV_L:mode>_ze<GPI:mode>" + [(set (match_operand:GPI 0 "register_operand" "=w") + (zero_extend:GPI + (unspec:<VWIDE_S> + [(match_operand:VDQV_L 1 "register_operand" "w")] + USADDLV)))] + "TARGET_SIMD + && (GET_MODE_SIZE (<GPI:MODE>mode) > GET_MODE_SIZE (<VWIDE_S>mode))" + "<su>addl<VDQV_L:vp>\\t%<VDQV_L:Vwstype>0<VDQV_L:Vwsuf>, %1.<VDQV_L:Vtype>" + [(set_attr "type" "neon_reduc_add<VDQV_L:q>")] +) + (define_insn "aarch64_<su>addlp<mode>" [(set (match_operand:<VDBLW> 0 "register_operand" "=w") (unspec:<VDBLW> [(match_operand:VDQV_L 1 "register_operand" "w")] diff --git a/gcc/testsuite/gcc.target/aarch64/simd/addlv_zext.c b/gcc/testsuite/gcc.target/aarch64/simd/addlv_zext.c new file mode 100644 index 0000000..1bd3c30 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/simd/addlv_zext.c @@ -0,0 +1,84 @@ +/* { dg-do compile } */ +/* { dg-additional-options "--save-temps -O1" } */ +/* { dg-final { check-function-bodies "**" "" "" } } */ + +#include <arm_neon.h> + +/* +** foo_8_32: +** uaddlv h0, v0.8b +** str s0, \[x0\] +** ret +*/ + +void +foo_8_32 (uint8x8_t a, uint32_t *res) +{ + *res = vaddlv_u8 (a); +} + +/* +** foo_8_64: +** uaddlv h0, v0.8b +** str d0, \[x0\] +** ret +*/ + +void +foo_8_64 (uint8x8_t a, uint64_t *res) +{ + *res = vaddlv_u8 (a); +} + +/* +** foo_16_64: +** uaddlv s0, v0.4h +** str d0, \[x0\] +** ret +*/ + +void +foo_16_64 (uint16x4_t a, uint64_t *res) +{ + *res = vaddlv_u16 (a); +} + +/* +** fooq_8_32: +** uaddlv h0, v0.16b +** str s0, \[x0\] +** ret +*/ + +void +fooq_8_32 (uint8x16_t a, uint32_t *res) +{ + *res = vaddlvq_u8 (a); +} + +/* +** fooq_8_64: +** uaddlv h0, v0.16b +** str d0, \[x0\] +** ret +*/ + +void +fooq_8_64 (uint8x16_t a, uint64_t *res) +{ + *res = vaddlvq_u8 (a); +} + +/* +** fooq_16_64: +** uaddlv s0, v0.8h +** str d0, \[x0\] +** ret +*/ + +void +fooq_16_64 (uint16x8_t a, uint64_t *res) +{ + *res = vaddlvq_u16 (a); +} + |