diff options
author | Jonathan Wright <jonathan.wright@arm.com> | 2021-02-04 23:00:00 +0000 |
---|---|---|
committer | Jonathan Wright <jonathan.wright@arm.com> | 2021-04-30 18:40:22 +0100 |
commit | f546e0d3d0316aa76a45de1f548591bde7308c41 (patch) | |
tree | 91cb2cb9a9545cb19a8601ade47e8496895f8e5a | |
parent | 5672fe9da4ab4e8787c288b64008251065c67c98 (diff) | |
download | gcc-f546e0d3d0316aa76a45de1f548591bde7308c41.zip gcc-f546e0d3d0316aa76a45de1f548591bde7308c41.tar.gz gcc-f546e0d3d0316aa76a45de1f548591bde7308c41.tar.bz2 |
aarch64: Use RTL builtins for vmull[_high]_p8 intrinsics
Rewrite vmull[_high]_p8 Neon intrinsics to use RTL builtins rather
than inline assembly code, allowing for better scheduling and
optimization.
gcc/ChangeLog:
2021-02-05 Jonathan Wright <joanthan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Add pmull[2]
builtin generator macros.
* config/aarch64/aarch64-simd.md (aarch64_pmullv8qi): Define.
(aarch64_pmull_hiv16qi_insn): Define.
(aarch64_pmull_hiv16qi): Define.
* config/aarch64/arm_neon.h (vmull_high_p8): Use RTL builtin
instead of inline asm.
(vmull_p8): Likewise.
-rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 2 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 38 | ||||
-rw-r--r-- | gcc/config/aarch64/arm_neon.h | 16 |
3 files changed, 44 insertions, 12 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 337ec8d..5d4c01f 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -46,6 +46,8 @@ BUILTIN_VDC (COMBINE, combine, 0, AUTO_FP) VAR1 (COMBINEP, combine, 0, NONE, di) BUILTIN_VB (BINOP, pmul, 0, NONE) + VAR1 (BINOP, pmull, 0, NONE, v8qi) + VAR1 (BINOP, pmull_hi, 0, NONE, v16qi) BUILTIN_VHSDF_HSDF (BINOP, fmulx, 0, FP) BUILTIN_VHSDF_DF (UNOP, sqrt, 2, FP) BUILTIN_VDQ_I (BINOP, addp, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index fbfed33..65e6390 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -4471,6 +4471,44 @@ [(set_attr "type" "neon_mul_<Vetype><q>")] ) +(define_insn "aarch64_pmullv8qi" + [(set (match_operand:V8HI 0 "register_operand" "=w") + (unspec:V8HI [(match_operand:V8QI 1 "register_operand" "w") + (match_operand:V8QI 2 "register_operand" "w")] + UNSPEC_PMULL))] + "TARGET_SIMD" + "pmull\\t%0.8h, %1.8b, %2.8b" + [(set_attr "type" "neon_mul_b_long")] +) + +(define_insn "aarch64_pmull_hiv16qi_insn" + [(set (match_operand:V8HI 0 "register_operand" "=w") + (unspec:V8HI + [(vec_select:V8QI + (match_operand:V16QI 1 "register_operand" "w") + (match_operand:V16QI 3 "vect_par_cnst_hi_half" "")) + (vec_select:V8QI + (match_operand:V16QI 2 "register_operand" "w") + (match_dup 3))] + UNSPEC_PMULL))] + "TARGET_SIMD" + "pmull2\\t%0.8h, %1.16b, %2.16b" + [(set_attr "type" "neon_mul_b_long")] +) + +(define_expand "aarch64_pmull_hiv16qi" + [(match_operand:V8HI 0 "register_operand") + (match_operand:V16QI 1 "register_operand") + (match_operand:V16QI 2 "register_operand")] + "TARGET_SIMD" + { + rtx p = aarch64_simd_vect_par_cnst_half (V16QImode, 16, true); + emit_insn (gen_aarch64_pmull_hiv16qi_insn (operands[0], operands[1], + operands[2], p)); + DONE; + } +) + ;; fmulx. (define_insn "aarch64_fmulx<mode>" diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 4b8ec52..bde2d17 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -8228,12 +8228,8 @@ __extension__ extern __inline poly16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_high_p8 (poly8x16_t __a, poly8x16_t __b) { - poly16x8_t __result; - __asm__ ("pmull2 %0.8h,%1.16b,%2.16b" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return (poly16x8_t) __builtin_aarch64_pmull_hiv16qi ((int8x16_t) __a, + (int8x16_t) __b); } __extension__ extern __inline int16x8_t @@ -8366,12 +8362,8 @@ __extension__ extern __inline poly16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_p8 (poly8x8_t __a, poly8x8_t __b) { - poly16x8_t __result; - __asm__ ("pmull %0.8h, %1.8b, %2.8b" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return (poly16x8_t) __builtin_aarch64_pmullv8qi ((int8x8_t) __a, + (int8x8_t) __b); } __extension__ extern __inline int16x8_t |