diff options
author | Wilco Dijkstra <wdijkstr@arm.com> | 2020-03-06 18:29:02 +0000 |
---|---|---|
committer | Wilco Dijkstra <wdijkstr@arm.com> | 2020-03-06 18:29:02 +0000 |
commit | 0b8393221177617f19e7c5c5c692b8c59f85fffb (patch) | |
tree | f6035c29b0c16c3c2ce9c4a0511327622bf47bd9 | |
parent | 3e5c062e96c11a6eaef1cbf94b5992391a850dbf (diff) | |
download | gcc-0b8393221177617f19e7c5c5c692b8c59f85fffb.zip gcc-0b8393221177617f19e7c5c5c692b8c59f85fffb.tar.gz gcc-0b8393221177617f19e7c5c5c692b8c59f85fffb.tar.bz2 |
[AArch64] Use intrinsics for widening multiplies (PR91598)
Inline assembler instructions don't have latency info and the scheduler does
not attempt to schedule them at all - it does not even honor latencies of
asm source operands. As a result, SIMD intrinsics which are implemented using
inline assembler perform very poorly, particularly on in-order cores.
Add new patterns and intrinsics for widening multiplies, which results in a
63% speedup for the example in the PR, thus fixing the reported regression.
gcc/
PR target/91598
* config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define.
* config/aarch64/aarch64-simd.md
(aarch64_vec_<su>mult_lane<Qlane>): Add new insn for widening lane mul.
(aarch64_vec_<su>mlal_lane<Qlane>): Likewise.
* config/aarch64/aarch64-simd-builtins.def: Add intrinsics.
* config/aarch64/arm_neon.h:
(vmlal_lane_s16): Expand using intrinsics rather than inline asm.
(vmlal_lane_u16): Likewise.
(vmlal_lane_s32): Likewise.
(vmlal_lane_u32): Likewise.
(vmlal_laneq_s16): Likewise.
(vmlal_laneq_u16): Likewise.
(vmlal_laneq_s32): Likewise.
(vmlal_laneq_u32): Likewise.
(vmull_lane_s16): Likewise.
(vmull_lane_u16): Likewise.
(vmull_lane_s32): Likewise.
(vmull_lane_u32): Likewise.
(vmull_laneq_s16): Likewise.
(vmull_laneq_u16): Likewise.
(vmull_laneq_s32): Likewise.
(vmull_laneq_u32): Likewise.
* config/aarch64/iterators.md (Vcondtype): New iterator for lane mul.
(Qlane): Likewise.
-rw-r--r-- | gcc/ChangeLog | 28 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-builtins.c | 5 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 9 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 40 | ||||
-rw-r--r-- | gcc/config/aarch64/arm_neon.h | 296 | ||||
-rw-r--r-- | gcc/config/aarch64/iterators.md | 7 |
6 files changed, 185 insertions, 200 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 1cb6694..2bc6f39 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,33 @@ 2020-03-06 Wilco Dijkstra <wdijkstr@arm.com> + PR target/91598 + * config/aarch64/aarch64-builtins.c (TYPES_TERNOPU_LANE): Add define. + * config/aarch64/aarch64-simd.md + (aarch64_vec_<su>mult_lane<Qlane>): Add new insn for widening lane mul. + (aarch64_vec_<su>mlal_lane<Qlane>): Likewise. + * config/aarch64/aarch64-simd-builtins.def: Add intrinsics. + * config/aarch64/arm_neon.h: + (vmlal_lane_s16): Expand using intrinsics rather than inline asm. + (vmlal_lane_u16): Likewise. + (vmlal_lane_s32): Likewise. + (vmlal_lane_u32): Likewise. + (vmlal_laneq_s16): Likewise. + (vmlal_laneq_u16): Likewise. + (vmlal_laneq_s32): Likewise. + (vmlal_laneq_u32): Likewise. + (vmull_lane_s16): Likewise. + (vmull_lane_u16): Likewise. + (vmull_lane_s32): Likewise. + (vmull_lane_u32): Likewise. + (vmull_laneq_s16): Likewise. + (vmull_laneq_u16): Likewise. + (vmull_laneq_s32): Likewise. + (vmull_laneq_u32): Likewise. + * config/aarch64/iterators.md (Vcondtype): New iterator for lane mul. + (Qlane): Likewise. + +2020-03-06 Wilco Dijkstra <wdijkstr@arm.com> + * aarch64/aarch64-simd.md (aarch64_mla_elt<mode>): Correct lane syntax. (aarch64_mla_elt_<vswap_width_name><mode>): Likewise. (aarch64_mls_elt<mode>): Likewise. diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index 9c9c6d8..5744e68 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -175,6 +175,11 @@ aarch64_types_ternopu_qualifiers[SIMD_MAX_BUILTIN_ARGS] qualifier_unsigned, qualifier_unsigned }; #define TYPES_TERNOPU (aarch64_types_ternopu_qualifiers) static enum aarch64_type_qualifiers +aarch64_types_ternopu_lane_qualifiers[SIMD_MAX_BUILTIN_ARGS] + = { qualifier_unsigned, qualifier_unsigned, + qualifier_unsigned, qualifier_lane_index }; +#define TYPES_TERNOPU_LANE (aarch64_types_ternopu_lane_qualifiers) +static enum aarch64_type_qualifiers aarch64_types_ternopu_imm_qualifiers[SIMD_MAX_BUILTIN_ARGS] = { qualifier_unsigned, qualifier_unsigned, qualifier_unsigned, qualifier_immediate }; diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index cc0bd0e..332a0b6 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -191,6 +191,15 @@ BUILTIN_VQW (BINOP, vec_widen_smult_hi_, 10) BUILTIN_VQW (BINOPU, vec_widen_umult_hi_, 10) + BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_lane_, 0) + BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_lane_, 0) + BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_laneq_, 0) + BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_laneq_, 0) + BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_lane_, 0) + BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_lane_, 0) + BUILTIN_VD_HSI (TERNOPU_LANE, vec_umult_laneq_, 0) + BUILTIN_VD_HSI (QUADOPU_LANE, vec_umlal_laneq_, 0) + BUILTIN_VSD_HSI (BINOP, sqdmull, 0) BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_lane, 0) BUILTIN_VSD_HSI (TERNOP_LANE, sqdmull_laneq, 0) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index e5cf4e4..24a11fb 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1892,6 +1892,46 @@ } ) +;; vmull_lane_s16 intrinsics +(define_insn "aarch64_vec_<su>mult_lane<Qlane>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> + (match_operand:<VCOND> 1 "register_operand" "w")) + (ANY_EXTEND:<VWIDE> + (vec_duplicate:<VCOND> + (vec_select:<VEL> + (match_operand:VDQHS 2 "register_operand" "<vwx>") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))))))] + "TARGET_SIMD" + { + operands[3] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[3])); + return "<su>mull\\t%0.<Vwtype>, %1.<Vcondtype>, %2.<Vetype>[%3]"; + } + [(set_attr "type" "neon_mul_<Vetype>_scalar_long")] +) + +;; vmlal_lane_s16 intrinsics +(define_insn "aarch64_vec_<su>mlal_lane<Qlane>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (plus:<VWIDE> + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> + (match_operand:<VCOND> 2 "register_operand" "w")) + (ANY_EXTEND:<VWIDE> + (vec_duplicate:<VCOND> + (vec_select:<VEL> + (match_operand:VDQHS 3 "register_operand" "<vwx>") + (parallel [(match_operand:SI 4 "immediate_operand" "i")]))))) + (match_operand:<VWIDE> 1 "register_operand" "0")))] + "TARGET_SIMD" + { + operands[4] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[4])); + return "<su>mlal\\t%0.<Vwtype>, %2.<Vcondtype>, %3.<Vetype>[%4]"; + } + [(set_attr "type" "neon_mla_<Vetype>_scalar_long")] +) + ;; FP vector operations. ;; AArch64 AdvSIMD supports single-precision (32-bit) and ;; double-precision (64-bit) floating-point data types and arithmetic as diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index cc4ce76..50f8b23 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7700,117 +7700,61 @@ vmlal_high_u32 (uint64x2_t __a, uint32x4_t __b, uint32x4_t __c) return __result; } -#define vmlal_lane_s16(a, b, c, d) \ - __extension__ \ - ({ \ - int16x4_t c_ = (c); \ - int16x4_t b_ = (b); \ - int32x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smlal %0.4s,%2.4h,%3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_s16 (int32x4_t __acc, int16x4_t __a, int16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_lane_v4hi (__acc, __a, __b, __c); +} -#define vmlal_lane_s32(a, b, c, d) \ - __extension__ \ - ({ \ - int32x2_t c_ = (c); \ - int32x2_t b_ = (b); \ - int64x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smlal %0.2d,%2.2s,%3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_s32 (int64x2_t __acc, int32x2_t __a, int32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_lane_v2si (__acc, __a, __b, __c); +} -#define vmlal_lane_u16(a, b, c, d) \ - __extension__ \ - ({ \ - uint16x4_t c_ = (c); \ - uint16x4_t b_ = (b); \ - uint32x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umlal %0.4s,%2.4h,%3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_lane_v4hi_uuuus (__acc, __a, __b, __c); +} -#define vmlal_lane_u32(a, b, c, d) \ - __extension__ \ - ({ \ - uint32x2_t c_ = (c); \ - uint32x2_t b_ = (b); \ - uint64x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_lane_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_lane_v2si_uuuus (__acc, __a, __b, __c); +} -#define vmlal_laneq_s16(a, b, c, d) \ - __extension__ \ - ({ \ - int16x8_t c_ = (c); \ - int16x4_t b_ = (b); \ - int32x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smlal %0.4s, %2.4h, %3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_s16 (int32x4_t __acc, int16x4_t __a, int16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_laneq_v4hi (__acc, __a, __b, __c); +} -#define vmlal_laneq_s32(a, b, c, d) \ - __extension__ \ - ({ \ - int32x4_t c_ = (c); \ - int32x2_t b_ = (b); \ - int64x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smlal %0.2d, %2.2s, %3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_s32 (int64x2_t __acc, int32x2_t __a, int32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smlal_laneq_v2si (__acc, __a, __b, __c); +} -#define vmlal_laneq_u16(a, b, c, d) \ - __extension__ \ - ({ \ - uint16x8_t c_ = (c); \ - uint16x4_t b_ = (b); \ - uint32x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umlal %0.4s, %2.4h, %3.h[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "x"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_u16 (uint32x4_t __acc, uint16x4_t __a, uint16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_laneq_v4hi_uuuus (__acc, __a, __b, __c); +} -#define vmlal_laneq_u32(a, b, c, d) \ - __extension__ \ - ({ \ - uint32x4_t c_ = (c); \ - uint32x2_t b_ = (b); \ - uint64x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umlal %0.2d, %2.2s, %3.s[%4]" \ - : "=w"(result) \ - : "0"(a_), "w"(b_), "w"(c_), "i"(d) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmlal_laneq_u32 (uint64x2_t __acc, uint32x2_t __a, uint32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umlal_laneq_v2si_uuuus (__acc, __a, __b, __c); +} __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) @@ -9289,109 +9233,61 @@ vmull_high_u32 (uint32x4_t __a, uint32x4_t __b) return __builtin_aarch64_vec_widen_umult_hi_v4si_uuu (__a, __b); } -#define vmull_lane_s16(a, b, c) \ - __extension__ \ - ({ \ - int16x4_t b_ = (b); \ - int16x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smull %0.4s,%1.4h,%2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_s16 (int16x4_t __a, int16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_lane_v4hi (__a, __b, __c); +} -#define vmull_lane_s32(a, b, c) \ - __extension__ \ - ({ \ - int32x2_t b_ = (b); \ - int32x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smull %0.2d,%1.2s,%2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_s32 (int32x2_t __a, int32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_lane_v2si (__a, __b, __c); +} -#define vmull_lane_u16(a, b, c) \ - __extension__ \ - ({ \ - uint16x4_t b_ = (b); \ - uint16x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umull %0.4s,%1.4h,%2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_u16 (uint16x4_t __a, uint16x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_lane_v4hi_uuus (__a, __b, __c); +} -#define vmull_lane_u32(a, b, c) \ - __extension__ \ - ({ \ - uint32x2_t b_ = (b); \ - uint32x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_lane_u32 (uint32x2_t __a, uint32x2_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_lane_v2si_uuus (__a, __b, __c); +} -#define vmull_laneq_s16(a, b, c) \ - __extension__ \ - ({ \ - int16x8_t b_ = (b); \ - int16x4_t a_ = (a); \ - int32x4_t result; \ - __asm__ ("smull %0.4s, %1.4h, %2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_s16 (int16x4_t __a, int16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_laneq_v4hi (__a, __b, __c); +} -#define vmull_laneq_s32(a, b, c) \ - __extension__ \ - ({ \ - int32x4_t b_ = (b); \ - int32x2_t a_ = (a); \ - int64x2_t result; \ - __asm__ ("smull %0.2d, %1.2s, %2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline int64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_s32 (int32x2_t __a, int32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_smult_laneq_v2si (__a, __b, __c); +} -#define vmull_laneq_u16(a, b, c) \ - __extension__ \ - ({ \ - uint16x8_t b_ = (b); \ - uint16x4_t a_ = (a); \ - uint32x4_t result; \ - __asm__ ("umull %0.4s, %1.4h, %2.h[%3]" \ - : "=w"(result) \ - : "w"(a_), "x"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint32x4_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_u16 (uint16x4_t __a, uint16x8_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_laneq_v4hi_uuus (__a, __b, __c); +} -#define vmull_laneq_u32(a, b, c) \ - __extension__ \ - ({ \ - uint32x4_t b_ = (b); \ - uint32x2_t a_ = (a); \ - uint64x2_t result; \ - __asm__ ("umull %0.2d, %1.2s, %2.s[%3]" \ - : "=w"(result) \ - : "w"(a_), "w"(b_), "i"(c) \ - : /* No clobbers */); \ - result; \ - }) +__extension__ extern __inline uint64x2_t +__attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) +vmull_laneq_u32 (uint32x2_t __a, uint32x4_t __b, const int __c) +{ + return __builtin_aarch64_vec_umult_laneq_v2si_uuus (__a, __b, __c); +} __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index b56a050..95fa3e4 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -986,6 +986,13 @@ (V4SF "4s") (V2DF "2d") (V4HF "4h") (V8HF "8h")]) +;; Map mode to type used in widening multiplies. +(define_mode_attr Vcondtype [(V4HI "4h") (V8HI "4h") (V2SI "2s") (V4SI "2s")]) + +;; Map lane mode to name +(define_mode_attr Qlane [(V4HI "_v4hi") (V8HI "q_v4hi") + (V2SI "_v2si") (V4SI "q_v2si")]) + (define_mode_attr Vrevsuff [(V4HI "16") (V8HI "16") (V2SI "32") (V4SI "32") (V2DI "64")]) |