aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2021-01-27 14:55:45 +0000
committerJonathan Wright <jonathan.wright@arm.com>2021-02-03 13:33:50 +0000
commit719877b079678f68f8acb3b4202432e93cd2c5a0 (patch)
tree5b4ec735ba9b6a1183f1f383fdff5073073543b5 /gcc
parent6dc82826ba61b25855e5a79f4479d009395a7299 (diff)
downloadgcc-719877b079678f68f8acb3b4202432e93cd2c5a0.zip
gcc-719877b079678f68f8acb3b4202432e93cd2c5a0.tar.gz
gcc-719877b079678f68f8acb3b4202432e93cd2c5a0.tar.bz2
aarch64: Use RTL builtins for [su]mlal_high intrinsics
Rewrite [su]mlal_high Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization. gcc/ChangeLog: 2021-01-27 Jonathan Wright <jonathan.wright@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add RTL builtin generator macros. * config/aarch64/aarch64-simd.md (*aarch64_<su>mlal_hi<mode>): Rename to... (aarch64_<su>mlal_hi<mode>_insn): This. (aarch64_<su>mlal_hi<mode>): Define. * config/aarch64/arm_neon.h (vmlal_high_s8): Use RTL builtin instead of inline asm. (vmlal_high_s16): Likewise. (vmlal_high_s32): Likewise. (vmlal_high_u8): Likewise. (vmlal_high_u16): Likewise. (vmlal_high_u32): Likewise.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def4
-rw-r--r--gcc/config/aarch64/aarch64-simd.md16
-rw-r--r--gcc/config/aarch64/arm_neon.h42
3 files changed, 25 insertions, 37 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 48e481c..8cf5737 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -240,6 +240,10 @@
BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE)
BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE)
+ /* Implemented by aarch64_<su>mlal_hi<mode>. */
+ BUILTIN_VQW (TERNOP, smlal_hi, 0, NONE)
+ BUILTIN_VQW (TERNOPU, umlal_hi, 0, NONE)
+
BUILTIN_VSQN_HSDI (UNOPUS, sqmovun, 0, NONE)
/* Implemented by aarch64_sqxtun2<mode>. */
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 767d673..d10cc2a 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1976,7 +1976,7 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
-(define_insn "*aarch64_<su>mlal_hi<mode>"
+(define_insn "aarch64_<su>mlal_hi<mode>_insn"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(plus:<VWIDE>
(mult:<VWIDE>
@@ -1992,6 +1992,20 @@
[(set_attr "type" "neon_mla_<Vetype>_long")]
)
+(define_expand "aarch64_<su>mlal_hi<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand")
+ (match_operand:<VWIDE> 1 "register_operand")
+ (ANY_EXTEND:<VWIDE>(match_operand:VQW 2 "register_operand"))
+ (match_operand:VQW 3 "register_operand")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
+ emit_insn (gen_aarch64_<su>mlal_hi<mode>_insn (operands[0], operands[1],
+ operands[2], p, operands[3]));
+ DONE;
+}
+)
+
(define_insn "*aarch64_<su>mlsl_lo<mode>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(minus:<VWIDE>
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 4b905d9..e994aa1 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7316,72 +7316,42 @@ __extension__ extern __inline int16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s8 (int16x8_t __a, int8x16_t __b, int8x16_t __c)
{
- int16x8_t __result;
- __asm__ ("smlal2 %0.8h,%2.16b,%3.16b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_smlal_hiv16qi (__a, __b, __c);
}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s16 (int32x4_t __a, int16x8_t __b, int16x8_t __c)
{
- int32x4_t __result;
- __asm__ ("smlal2 %0.4s,%2.8h,%3.8h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_smlal_hiv8hi (__a, __b, __c);
}
__extension__ extern __inline int64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_s32 (int64x2_t __a, int32x4_t __b, int32x4_t __c)
{
- int64x2_t __result;
- __asm__ ("smlal2 %0.2d,%2.4s,%3.4s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_smlal_hiv4si (__a, __b, __c);
}
__extension__ extern __inline uint16x8_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u8 (uint16x8_t __a, uint8x16_t __b, uint8x16_t __c)
{
- uint16x8_t __result;
- __asm__ ("umlal2 %0.8h,%2.16b,%3.16b"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_umlal_hiv16qi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u16 (uint32x4_t __a, uint16x8_t __b, uint16x8_t __c)
{
- uint32x4_t __result;
- __asm__ ("umlal2 %0.4s,%2.8h,%3.8h"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_umlal_hiv8hi_uuuu (__a, __b, __c);
}
__extension__ extern __inline uint64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c)
{
- uint64x2_t __result;
- __asm__ ("umlal2 %0.2d,%2.4s,%3.4s"
- : "=w"(__result)
- : "0"(__a), "w"(__b), "w"(__c)
- : /* No clobbers */);
- return __result;
+ return __builtin_aarch64_umlal_hiv4si_uuuu (__a, __b, __c);
}
__extension__ extern __inline int32x4_t