aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2021-02-08 21:23:48 +0000
committerJonathan Wright <jonathan.wright@arm.com>2021-04-28 21:11:13 +0100
commitfa18085a32df06be6e7d899fd804d537c0149baf (patch)
tree536f7c6f7f11e9ccb3eabe3383dc32585780bcb4
parenteb2b36024c94bc32465777927092cdbdf2d95204 (diff)
downloadgcc-fa18085a32df06be6e7d899fd804d537c0149baf.zip
gcc-fa18085a32df06be6e7d899fd804d537c0149baf.tar.gz
gcc-fa18085a32df06be6e7d899fd804d537c0149baf.tar.bz2
aarch64: Use RTL builtins for [su]paddl[q] intrinsics
Rewrite [su]paddl[q] Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization. gcc/ChangeLog: 2021-02-08 Jonathan Wright <jonathan.wright@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add [su]addlp builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_<su>addlp<mode>): Define. * config/aarch64/arm_neon.h (vpaddl_s8): Use RTL builtin instead of inline asm. (vpaddl_s16): Likewise. (vpaddl_s32): Likewise. (vpaddl_u8): Likewise. (vpaddl_u16): Likewise. (vpaddl_u32): Likewise. (vpaddlq_s8): Likewise. (vpaddlq_s16): Likewise. (vpaddlq_s32): Likewise. (vpaddlq_u8): Likewise. (vpaddlq_u16): Likewise. (vpaddlq_u32): Liwewise. * config/aarch64/iterators.md: Define [SU]ADDLP unspecs with appropriate attributes.
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def4
-rw-r--r--gcc/config/aarch64/aarch64-simd.md9
-rw-r--r--gcc/config/aarch64/arm_neon.h84
-rw-r--r--gcc/config/aarch64/iterators.md6
4 files changed, 31 insertions, 72 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 92804e0..ecf8019 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -154,6 +154,10 @@
BUILTIN_VDQ_BHSI (BINOP, srhadd, 0, NONE)
BUILTIN_VDQ_BHSI (BINOP, urhadd, 0, NONE)
+ /* Implemented by aarch64_<su>addlp<mode>. */
+ BUILTIN_VDQV_L (UNOP, saddlp, 0, NONE)
+ BUILTIN_VDQV_L (UNOPU, uaddlp, 0, NONE)
+
/* Implemented by aarch64_<su>addlv<mode>. */
BUILTIN_VDQV_L (UNOP, saddlv, 0, NONE)
BUILTIN_VDQV_L (UNOPU, uaddlv, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 60e11c6..8aae6a6 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -3149,6 +3149,15 @@
[(set_attr "type" "neon_reduc_add<q>")]
)
+(define_insn "aarch64_<su>addlp<mode>"
+ [(set (match_operand:<VDBLW> 0 "register_operand" "=w")
+ (unspec:<VDBLW> [(match_operand:VDQV_L 1 "register_operand" "w")]
+ USADDLP))]
+ "TARGET_SIMD"
+ "<su>addlp\\t%0.<Vwhalf>, %1.<Vtype>"
+ [(set_attr "type" "neon_reduc_add<q>")]
+)
+
;; ADDV with result zero-extended to SI/DImode (for popcount).
(define_insn "aarch64_zero_extend<GPI:mode>_reduc_plus_<VDQV_E:mode>"
[(set (match_operand:GPI 0 "register_operand" "=w")
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 52f3714..7eed6c6 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8529,144 +8529,84 @@ __extension__ extern __inline int16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s8 (int8x8_t __a)
{
- int16x4_t __result;
- __asm__ ("saddlp %0.4h,%1.8b"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_saddlpv8qi (__a);
}
__extension__ extern __inline int32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s16 (int16x4_t __a)
{
- int32x2_t __result;
- __asm__ ("saddlp %0.2s,%1.4h"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_saddlpv4hi (__a);
}
__extension__ extern __inline int64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_s32 (int32x2_t __a)
{
- int64x1_t __result;
- __asm__ ("saddlp %0.1d,%1.2s"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return (int64x1_t) __builtin_aarch64_saddlpv2si (__a);
}
__extension__ extern __inline uint16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u8 (uint8x8_t __a)
{
- uint16x4_t __result;
- __asm__ ("uaddlp %0.4h,%1.8b"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uaddlpv8qi_uu (__a);
}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u16 (uint16x4_t __a)
{
- uint32x2_t __result;
- __asm__ ("uaddlp %0.2s,%1.4h"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uaddlpv4hi_uu (__a);
}
__extension__ extern __inline uint64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddl_u32 (uint32x2_t __a)
{
- uint64x1_t __result;
- __asm__ ("uaddlp %0.1d,%1.2s"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return (uint64x1_t) __builtin_aarch64_uaddlpv2si_uu (__a);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s8 (int8x16_t __a)
{
- int16x8_t __result;
- __asm__ ("saddlp %0.8h,%1.16b"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_saddlpv16qi (__a);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s16 (int16x8_t __a)
{
- int32x4_t __result;
- __asm__ ("saddlp %0.4s,%1.8h"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_saddlpv8hi (__a);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_s32 (int32x4_t __a)
{
- int64x2_t __result;
- __asm__ ("saddlp %0.2d,%1.4s"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_saddlpv4si (__a);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u8 (uint8x16_t __a)
{
- uint16x8_t __result;
- __asm__ ("uaddlp %0.8h,%1.16b"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uaddlpv16qi_uu (__a);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u16 (uint16x8_t __a)
{
- uint32x4_t __result;
- __asm__ ("uaddlp %0.4s,%1.8h"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uaddlpv8hi_uu (__a);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vpaddlq_u32 (uint32x4_t __a)
{
- uint64x2_t __result;
- __asm__ ("uaddlp %0.2d,%1.4s"
- : "=w"(__result)
- : "w"(__a)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_uaddlpv4si_uu (__a);
}
__extension__ extern __inline int8x16_t
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index a3d895a..8a765ea 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -550,6 +550,8 @@
UNSPEC_SSHLL ; Used in aarch64-simd.md.
UNSPEC_USHLL ; Used in aarch64-simd.md.
UNSPEC_ADDP ; Used in aarch64-simd.md.
+ UNSPEC_SADDLP ; Used in aarch64-simd.md.
+ UNSPEC_UADDLP ; Used in aarch64-simd.md.
UNSPEC_TBL ; Used in vector permute patterns.
UNSPEC_TBX ; Used in vector permute patterns.
UNSPEC_CONCAT ; Used in vector permute patterns.
@@ -2209,6 +2211,8 @@
(define_int_iterator SVE_INT_ADDV [UNSPEC_SADDV UNSPEC_UADDV])
+(define_int_iterator USADDLP [UNSPEC_SADDLP UNSPEC_UADDLP])
+
(define_int_iterator USADDLV [UNSPEC_SADDLV UNSPEC_UADDLV])
(define_int_iterator LOGICALF [UNSPEC_ANDF UNSPEC_IORF UNSPEC_XORF])
@@ -2961,6 +2965,8 @@
;; "s" for signed operations and "u" for unsigned ones.
(define_int_attr su [(UNSPEC_SADDV "s")
(UNSPEC_UADDV "u")
+ (UNSPEC_SADDLP "s")
+ (UNSPEC_UADDLP "u")
(UNSPEC_SADDLV "s")
(UNSPEC_UADDLV "u")
(UNSPEC_UNPACKSHI "s")