diff options
author | Jonathan Wright <jonathan.wright@arm.com> | 2021-11-09 11:27:03 +0000 |
---|---|---|
committer | Jonathan Wright <jonathan.wright@arm.com> | 2021-11-11 15:34:50 +0000 |
commit | 3e35924cf19b7e45b7fa6f294b8122ba12a8725c (patch) | |
tree | 8f6152605d7d540bc009838c30a9cb5b75cd725e /gcc | |
parent | ee03bed0b05ad77010719b1c7f196a7bb26b8d6f (diff) | |
download | gcc-3e35924cf19b7e45b7fa6f294b8122ba12a8725c.zip gcc-3e35924cf19b7e45b7fa6f294b8122ba12a8725c.tar.gz gcc-3e35924cf19b7e45b7fa6f294b8122ba12a8725c.tar.bz2 |
aarch64: Use type-qualified builtins for U[R]HADD Neon intrinsics
Declare unsigned type-qualified builtins and use them to implement
(rounding) halving-add Neon intrinsics. This removes the need for
many casts in arm_neon.h.
gcc/ChangeLog:
2021-11-09 Jonathan Wright <jonathan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Use BINOPU type
qualifiers in generator macros for u[r]hadd builtins.
* config/aarch64/arm_neon.h (vhadd_s8): Remove unnecessary
cast.
(vhadd_s16): Likewise.
(vhadd_s32): Likewise.
(vhadd_u8): Use type-qualified builtin and remove casts.
(vhadd_u16): Likewise.
(vhadd_u32): Likewise.
(vhaddq_s8): Remove unnecessary cast.
(vhaddq_s16): Likewise.
(vhaddq_s32): Likewise.
(vhaddq_u8): Use type-qualified builtin and remove casts.
(vhaddq_u16): Likewise.
(vhaddq_u32): Likewise.
(vrhadd_s8): Remove unnecessary cast.
(vrhadd_s16): Likewise.
(vrhadd_s32): Likewise.
(vrhadd_u8): Use type-qualified builtin and remove casts.
(vrhadd_u16): Likewise.
(vrhadd_u32): Likewise.
(vrhaddq_s8): Remove unnecessary cast.
(vrhaddq_s16): Likewise.
(vrhaddq_s32): Likewise.
(vrhaddq_u8): Use type-wualified builtin and remove casts.
(vrhaddq_u16): Likewise.
(vrhaddq_u32): Likewise.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 4 | ||||
-rw-r--r-- | gcc/config/aarch64/arm_neon.h | 60 |
2 files changed, 26 insertions, 38 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 463c2b5..e21ca76 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -178,10 +178,10 @@ /* Implemented by aarch64_<sur>h<addsub><mode>. */ BUILTIN_VDQ_BHSI (BINOP, shadd, 0, NONE) BUILTIN_VDQ_BHSI (BINOP, shsub, 0, NONE) - BUILTIN_VDQ_BHSI (BINOP, uhadd, 0, NONE) + BUILTIN_VDQ_BHSI (BINOPU, uhadd, 0, NONE) BUILTIN_VDQ_BHSI (BINOP, uhsub, 0, NONE) BUILTIN_VDQ_BHSI (BINOP, srhadd, 0, NONE) - BUILTIN_VDQ_BHSI (BINOP, urhadd, 0, NONE) + BUILTIN_VDQ_BHSI (BINOPU, urhadd, 0, NONE) /* Implemented by aarch64_<su>addlp<mode>. */ BUILTIN_VDQV_L (UNOP, saddlp, 0, NONE) diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index c1bf748..0848b01 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -545,180 +545,168 @@ __extension__ extern __inline int8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhadd_s8 (int8x8_t __a, int8x8_t __b) { - return (int8x8_t) __builtin_aarch64_shaddv8qi (__a, __b); + return __builtin_aarch64_shaddv8qi (__a, __b); } __extension__ extern __inline int16x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhadd_s16 (int16x4_t __a, int16x4_t __b) { - return (int16x4_t) __builtin_aarch64_shaddv4hi (__a, __b); + return __builtin_aarch64_shaddv4hi (__a, __b); } __extension__ extern __inline int32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhadd_s32 (int32x2_t __a, int32x2_t __b) { - return (int32x2_t) __builtin_aarch64_shaddv2si (__a, __b); + return __builtin_aarch64_shaddv2si (__a, __b); } __extension__ extern __inline uint8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhadd_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_uhaddv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return __builtin_aarch64_uhaddv8qi_uuu (__a, __b); } __extension__ extern __inline uint16x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhadd_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_uhaddv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return __builtin_aarch64_uhaddv4hi_uuu (__a, __b); } __extension__ extern __inline uint32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhadd_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_uhaddv2si ((int32x2_t) __a, - (int32x2_t) __b); + return __builtin_aarch64_uhaddv2si_uuu (__a, __b); } __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhaddq_s8 (int8x16_t __a, int8x16_t __b) { - return (int8x16_t) __builtin_aarch64_shaddv16qi (__a, __b); + return __builtin_aarch64_shaddv16qi (__a, __b); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhaddq_s16 (int16x8_t __a, int16x8_t __b) { - return (int16x8_t) __builtin_aarch64_shaddv8hi (__a, __b); + return __builtin_aarch64_shaddv8hi (__a, __b); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhaddq_s32 (int32x4_t __a, int32x4_t __b) { - return (int32x4_t) __builtin_aarch64_shaddv4si (__a, __b); + return __builtin_aarch64_shaddv4si (__a, __b); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhaddq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_uhaddv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return __builtin_aarch64_uhaddv16qi_uuu (__a, __b); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhaddq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_uhaddv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return __builtin_aarch64_uhaddv8hi_uuu (__a, __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vhaddq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_uhaddv4si ((int32x4_t) __a, - (int32x4_t) __b); + return __builtin_aarch64_uhaddv4si_uuu (__a, __b); } __extension__ extern __inline int8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhadd_s8 (int8x8_t __a, int8x8_t __b) { - return (int8x8_t) __builtin_aarch64_srhaddv8qi (__a, __b); + return __builtin_aarch64_srhaddv8qi (__a, __b); } __extension__ extern __inline int16x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhadd_s16 (int16x4_t __a, int16x4_t __b) { - return (int16x4_t) __builtin_aarch64_srhaddv4hi (__a, __b); + return __builtin_aarch64_srhaddv4hi (__a, __b); } __extension__ extern __inline int32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhadd_s32 (int32x2_t __a, int32x2_t __b) { - return (int32x2_t) __builtin_aarch64_srhaddv2si (__a, __b); + return __builtin_aarch64_srhaddv2si (__a, __b); } __extension__ extern __inline uint8x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhadd_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_urhaddv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return __builtin_aarch64_urhaddv8qi_uuu (__a, __b); } __extension__ extern __inline uint16x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhadd_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_urhaddv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return __builtin_aarch64_urhaddv4hi_uuu (__a, __b); } __extension__ extern __inline uint32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhadd_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_urhaddv2si ((int32x2_t) __a, - (int32x2_t) __b); + return __builtin_aarch64_urhaddv2si_uuu (__a, __b); } __extension__ extern __inline int8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhaddq_s8 (int8x16_t __a, int8x16_t __b) { - return (int8x16_t) __builtin_aarch64_srhaddv16qi (__a, __b); + return __builtin_aarch64_srhaddv16qi (__a, __b); } __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhaddq_s16 (int16x8_t __a, int16x8_t __b) { - return (int16x8_t) __builtin_aarch64_srhaddv8hi (__a, __b); + return __builtin_aarch64_srhaddv8hi (__a, __b); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhaddq_s32 (int32x4_t __a, int32x4_t __b) { - return (int32x4_t) __builtin_aarch64_srhaddv4si (__a, __b); + return __builtin_aarch64_srhaddv4si (__a, __b); } __extension__ extern __inline uint8x16_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhaddq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_urhaddv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return __builtin_aarch64_urhaddv16qi_uuu (__a, __b); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhaddq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_urhaddv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return __builtin_aarch64_urhaddv8hi_uuu (__a, __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vrhaddq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_urhaddv4si ((int32x4_t) __a, - (int32x4_t) __b); + return __builtin_aarch64_urhaddv4si_uuu (__a, __b); } __extension__ extern __inline int8x8_t |