aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2021-02-02 01:31:32 +0000
committerJonathan Wright <jonathan.wright@arm.com>2021-02-03 14:01:15 +0000
commit9a00ff96fad209ebde56b227d313cad5d769dc55 (patch)
tree308dbfa2da68f7caa25034c945720d990bc43827 /gcc
parentb2c4cf7b19d2441307132727dde0fb63f27d1530 (diff)
downloadgcc-9a00ff96fad209ebde56b227d313cad5d769dc55.zip
gcc-9a00ff96fad209ebde56b227d313cad5d769dc55.tar.gz
gcc-9a00ff96fad209ebde56b227d313cad5d769dc55.tar.bz2
aarch64: Use RTL builtins for [su]mlal_high_lane[q] intrinsics
Rewrite [su]mlal_high_lane[q] Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization. gcc/ChangeLog: 2021-02-02 Jonathan Wright <jonathan.wright@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add [su]mlal_hi_lane[q] builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_<su>mlal_hi_lane<mode>_insn): Define. (aarch64_<su>mlal_hi_lane<mode>): Define. (aarch64_<su>mlal_hi_laneq<mode>_insn): Define. (aarch64_<su>mlal_hi_laneq<mode>): Define. * config/aarch64/arm_neon.h (vmlal_high_lane_s16): Use RTL builtin instead of inline asm. (vmlal_high_lane_s32): Likewise. (vmlal_high_lane_u16): Likewise. (vmlal_high_lane_u32): Likewise. (vmlal_high_laneq_s16): Likewise. (vmlal_high_laneq_s32): Likewise. (vmlal_high_laneq_u16): Likewise. (vmlal_high_laneq_u32): Likewise.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def5
-rw-r--r--gcc/config/aarch64/aarch64-simd.md70
-rw-r--r--gcc/config/aarch64/arm_neon.h160
3 files changed, 131 insertions, 104 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 319cd64..55cc2a4 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -314,6 +314,11 @@
BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlsl_lane_, 0, NONE)
BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlsl_laneq_, 0, NONE)
+ BUILTIN_VQ_HSI (QUADOP_LANE, smlal_hi_lane, 0, NONE)
+ BUILTIN_VQ_HSI (QUADOP_LANE, smlal_hi_laneq, 0, NONE)
+ BUILTIN_VQ_HSI (QUADOPU_LANE, umlal_hi_lane, 0, NONE)
+ BUILTIN_VQ_HSI (QUADOPU_LANE, umlal_hi_laneq, 0, NONE)
+
BUILTIN_VSD_HSI (BINOP, sqdmull, 0, NONE)
BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0, NONE)
BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0, NONE)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index fd506bc..60ecd13 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2287,6 +2287,76 @@
[(set_attr "type" "neon_mla_<Vetype>_scalar_long")]
)
+(define_insn "aarch64_<su>mlal_hi_lane<mode>_insn"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (plus:<VWIDE>
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_duplicate:<VHALF>
+ (vec_select:<VEL>
+ (match_operand:<VCOND> 4 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 5 "immediate_operand" "i")])))))
+ (match_operand:<VWIDE> 1 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[5] = aarch64_endian_lane_rtx (<VCOND>mode, INTVAL (operands[5]));
+ return "<su>mlal2\\t%0.<Vwtype>, %2.<Vtype>, %4.<Vetype>[%5]";
+ }
+ [(set_attr "type" "neon_mla_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_<su>mlal_hi_lane<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand")
+ (match_operand:<VWIDE> 1 "register_operand")
+ (ANY_EXTEND:<VWIDE>(match_operand:VQ_HSI 2 "register_operand"))
+ (match_operand:<VCOND> 3 "register_operand")
+ (match_operand:SI 4 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
+ emit_insn (gen_aarch64_<su>mlal_hi_lane<mode>_insn (operands[0],
+ operands[1], operands[2], p, operands[3], operands[4]));
+ DONE;
+}
+)
+
+(define_insn "aarch64_<su>mlal_hi_laneq<mode>_insn"
+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
+ (plus:<VWIDE>
+ (mult:<VWIDE>
+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
+ (match_operand:VQ_HSI 2 "register_operand" "w")
+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
+ (ANY_EXTEND:<VWIDE> (vec_duplicate:<VHALF>
+ (vec_select:<VEL>
+ (match_operand:<VCONQ> 4 "register_operand" "<vwx>")
+ (parallel [(match_operand:SI 5 "immediate_operand" "i")])))))
+ (match_operand:<VWIDE> 1 "register_operand" "0")))]
+ "TARGET_SIMD"
+ {
+ operands[5] = aarch64_endian_lane_rtx (<VCONQ>mode, INTVAL (operands[5]));
+ return "<su>mlal2\\t%0.<Vwtype>, %2.<Vtype>, %4.<Vetype>[%5]";
+ }
+ [(set_attr "type" "neon_mla_<Vetype>_scalar_long")]
+)
+
+(define_expand "aarch64_<su>mlal_hi_laneq<mode>"
+ [(match_operand:<VWIDE> 0 "register_operand")
+ (match_operand:<VWIDE> 1 "register_operand")
+ (ANY_EXTEND:<VWIDE>(match_operand:VQ_HSI 2 "register_operand"))
+ (match_operand:<VCONQ> 3 "register_operand")
+ (match_operand:SI 4 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
+ emit_insn (gen_aarch64_<su>mlal_hi_laneq<mode>_insn (operands[0],
+ operands[1], operands[2], p, operands[3], operands[4]));
+ DONE;
+}
+)
+
(define_insn "aarch64_vec_<su>mlsl_lane<Qlane>"
[(set (match_operand:<VWIDE> 0 "register_operand" "=w")
(minus:<VWIDE>
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 11e6ca5..bfe6b2b 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -7152,117 +7152,69 @@ vmla_u32 (uint32x2_t __a, uint32x2_t __b, uint32x2_t __c)
(int32x2_t) __c);
}
-#define vmlal_high_lane_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x4_t c_ = (c); \
- int16x8_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_lane_s16(int32x4_t __a, int16x8_t __b, int16x4_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_smlal_hi_lanev8hi (__a, __b, __v, __lane);
+}
-#define vmlal_high_lane_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x2_t c_ = (c); \
- int32x4_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_lane_s32(int64x2_t __a, int32x4_t __b, int32x2_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_smlal_hi_lanev4si (__a, __b, __v, __lane);
+}
-#define vmlal_high_lane_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x4_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_lane_u16(uint32x4_t __a, uint16x8_t __b, uint16x4_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_umlal_hi_lanev8hi_uuuus (__a, __b, __v, __lane);
+}
-#define vmlal_high_lane_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x2_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_lane_u32(uint64x2_t __a, uint32x4_t __b, uint32x2_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_umlal_hi_lanev4si_uuuus (__a, __b, __v, __lane);
+}
-#define vmlal_high_laneq_s16(a, b, c, d) \
- __extension__ \
- ({ \
- int16x8_t c_ = (c); \
- int16x8_t b_ = (b); \
- int32x4_t a_ = (a); \
- int32x4_t result; \
- __asm__ ("smlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_laneq_s16(int32x4_t __a, int16x8_t __b, int16x8_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_smlal_hi_laneqv8hi (__a, __b, __v, __lane);
+}
-#define vmlal_high_laneq_s32(a, b, c, d) \
- __extension__ \
- ({ \
- int32x4_t c_ = (c); \
- int32x4_t b_ = (b); \
- int64x2_t a_ = (a); \
- int64x2_t result; \
- __asm__ ("smlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline int64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_laneq_s32(int64x2_t __a, int32x4_t __b, int32x4_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_smlal_hi_laneqv4si (__a, __b, __v, __lane);
+}
-#define vmlal_high_laneq_u16(a, b, c, d) \
- __extension__ \
- ({ \
- uint16x8_t c_ = (c); \
- uint16x8_t b_ = (b); \
- uint32x4_t a_ = (a); \
- uint32x4_t result; \
- __asm__ ("umlal2 %0.4s, %2.8h, %3.h[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "x"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint32x4_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_laneq_u16(uint32x4_t __a, uint16x8_t __b, uint16x8_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_umlal_hi_laneqv8hi_uuuus (__a, __b, __v, __lane);
+}
-#define vmlal_high_laneq_u32(a, b, c, d) \
- __extension__ \
- ({ \
- uint32x4_t c_ = (c); \
- uint32x4_t b_ = (b); \
- uint64x2_t a_ = (a); \
- uint64x2_t result; \
- __asm__ ("umlal2 %0.2d, %2.4s, %3.s[%4]" \
- : "=w"(result) \
- : "0"(a_), "w"(b_), "w"(c_), "i"(d) \
- : /* No clobbers */); \
- result; \
- })
+__extension__ extern __inline uint64x2_t
+__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
+vmlal_high_laneq_u32(uint64x2_t __a, uint32x4_t __b, uint32x4_t __v,
+ const int __lane)
+{
+ return __builtin_aarch64_umlal_hi_laneqv4si_uuuus (__a, __b, __v, __lane);
+}
__extension__ extern __inline int32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))