aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2021-01-14 18:38:07 +0000
committerJonathan Wright <jonathan.wright@arm.com>2021-01-22 23:18:11 +0000
commit16b7b8a32d430c23c3913aa2c04998fadb7cf273 (patch)
treec180429372b603f494e1b3ab947efc5ee722f804
parent89100826acec92dfaa6ab8f2646b8053e7dbc67c (diff)
downloadgcc-16b7b8a32d430c23c3913aa2c04998fadb7cf273.zip
gcc-16b7b8a32d430c23c3913aa2c04998fadb7cf273.tar.gz
gcc-16b7b8a32d430c23c3913aa2c04998fadb7cf273.tar.bz2
aarch64: Use RTL builtins for integer mla intrinsics
Rewrite integer mla Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization. gcc/Changelog: 2021-01-14 Jonathan Wright <jonathan.wright@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add mla builtin generator macro. * config/aarch64/arm_neon.h (vmla_s8): Use RTL builtin rather than asm. (vmla_s16): Likewise. (vmla_s32): Likewise. (vmla_u8): Likewise. (vmla_u16): Likewise. (vmla_u32): Likewise. (vmlaq_s8): Likewise. (vmlaq_s16): Likewise. (vmlaq_s32): Likewise. (vmlaq_u8): Likewise. (vmlaq_u16): Likewise. (vmlaq_u32): Likewise.
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def3
-rw-r--r--gcc/config/aarch64/arm_neon.h96
2 files changed, 27 insertions, 72 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 168e47d..a233156 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -178,6 +178,9 @@
/* Implemented by aarch64_xtn<mode>. */
BUILTIN_VQN (UNOP, xtn, 0, NONE)
+ /* Implemented by aarch64_mla<mode>. */
+ BUILTIN_VDQ_BHSI (TERNOP, mla, 0, NONE)
+
/* Implemented by aarch64_<su>mlsl<mode>. */
BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE)
BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index f7efee6..15fb345 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7294,72 +7294,48 @@ __extension__ extern __inline int8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_s8 (int8x8_t __a, int8x8_t __b, int8x8_t __c)
{
- int8x8_t __result;
- __asm__ ("mla %0.8b, %2.8b, %3.8b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_mlav8qi (__a, __b, __c);
}
__extension__ extern __inline int16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_s16 (int16x4_t __a, int16x4_t __b, int16x4_t __c)
{
- int16x4_t __result;
- __asm__ ("mla %0.4h, %2.4h, %3.4h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_mlav4hi (__a, __b, __c);
}
__extension__ extern __inline int32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_s32 (int32x2_t __a, int32x2_t __b, int32x2_t __c)
{
- int32x2_t __result;
- __asm__ ("mla %0.2s, %2.2s, %3.2s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_mlav2si (__a, __b, __c);
}
__extension__ extern __inline uint8x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_u8 (uint8x8_t __a, uint8x8_t __b, uint8x8_t __c)
{
- uint8x8_t __result;
- __asm__ ("mla %0.8b, %2.8b, %3.8b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return (uint8x8_t) __builtin_aarch64_mlav8qi ((int8x8_t) __a,
+ (int8x8_t) __b,
+ (int8x8_t) __c);
}
__extension__ extern __inline uint16x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_u16 (uint16x4_t __a, uint16x4_t __b, uint16x4_t __c)
{
- uint16x4_t __result;
- __asm__ ("mla %0.4h, %2.4h, %3.4h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return (uint16x4_t) __builtin_aarch64_mlav4hi ((int16x4_t) __a,
+ (int16x4_t) __b,
+ (int16x4_t) __c);
}
__extension__ extern __inline uint32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
{
- uint32x2_t __result;
- __asm__ ("mla %0.2s, %2.2s, %3.2s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return (uint32x2_t) __builtin_aarch64_mlav2si ((int32x2_t) __a,
+ (int32x2_t) __b,
+ (int32x2_t) __c);
}
#define vmlal_high_lane_s16(a, b, c, d) \
@@ -7835,72 +7811,48 @@ __extension__ extern __inline int8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s8 (int8x16_t __a, int8x16_t __b, int8x16_t __c)
{
- int8x16_t __result;
- __asm__ ("mla %0.16b, %2.16b, %3.16b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_mlav16qi (__a, __b, __c);
}
__extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s16 (int16x8_t __a, int16x8_t __b, int16x8_t __c)
{
- int16x8_t __result;
- __asm__ ("mla %0.8h, %2.8h, %3.8h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_mlav8hi (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_s32 (int32x4_t __a, int32x4_t __b, int32x4_t __c)
{
- int32x4_t __result;
- __asm__ ("mla %0.4s, %2.4s, %3.4s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_mlav4si (__a, __b, __c);
}
__extension__ extern __inline uint8x16_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u8 (uint8x16_t __a, uint8x16_t __b, uint8x16_t __c)
{
- uint8x16_t __result;
- __asm__ ("mla %0.16b, %2.16b, %3.16b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return (uint8x16_t) __builtin_aarch64_mlav16qi ((int8x16_t) __a,
+ (int8x16_t) __b,
+ (int8x16_t) __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u16 (uint16x8_t __a, uint16x8_t __b, uint16x8_t __c)
{
- uint16x8_t __result;
- __asm__ ("mla %0.8h, %2.8h, %3.8h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return (uint16x8_t) __builtin_aarch64_mlav8hi ((int16x8_t) __a,
+ (int16x8_t) __b,
+ (int16x8_t) __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_u32 (uint32x4_t __a, uint32x4_t __b, uint32x4_t __c)
{
- uint32x4_t __result;
- __asm__ ("mla %0.4s, %2.4s, %3.4s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return (uint32x4_t) __builtin_aarch64_mlav4si ((int32x4_t) __a,
+ (int32x4_t) __b,
+ (int32x4_t) __c);
}
__extension__ extern __inline float32x2_t