diff options
author | Jonathan Wright <jonathan.wright@arm.com> | 2021-01-18 12:42:52 +0000 |
---|---|---|
committer | Jonathan Wright <jonathan.wright@arm.com> | 2021-04-30 18:40:37 +0100 |
commit | 60518e6473248b16db9125504da0351707c35d1a (patch) | |
tree | 35242796fa056caf35a8e1ab3fc40c3caa968a6f | |
parent | f546e0d3d0316aa76a45de1f548591bde7308c41 (diff) | |
download | gcc-60518e6473248b16db9125504da0351707c35d1a.zip gcc-60518e6473248b16db9125504da0351707c35d1a.tar.gz gcc-60518e6473248b16db9125504da0351707c35d1a.tar.bz2 |
aarch64: Use RTL builtins for FP ml[as]_n intrinsics
Rewrite floating-point vml[as][q]_n Neon intrinsics to use RTL
builtins rather than inline assembly code, allowing for better
scheduling and optimization.
gcc/ChangeLog:
2021-01-18 Jonathan Wright <jonathan.wright@arm.com>
* config/aarch64/aarch64-simd-builtins.def: Add
float_ml[as]_n_builtin generator macros.
* config/aarch64/aarch64-simd.md (*aarch64_mul3_elt_from_dup<mode>):
Rename to...
(mul_n<mode>3): This, and re-order arguments.
(aarch64_float_mla_n<mode>): Define.
(aarch64_float_mls_n<mode>): Define.
* config/aarch64/arm_neon.h (vmla_n_f32): Use RTL builtin
instead of inline asm.
(vmlaq_n_f32): Likewise.
(vmls_n_f32): Likewise.
(vmlsq_n_f32): Likewise.
-rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 3 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 46 | ||||
-rw-r--r-- | gcc/config/aarch64/arm_neon.h | 32 |
3 files changed, 47 insertions, 34 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 5d4c01f..3b5e884 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -668,6 +668,9 @@ BUILTIN_VHSDF (TERNOP, fnma, 4, FP) VAR1 (TERNOP, fnma, 4, FP, hf) + BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP) + BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP) + /* Implemented by aarch64_simd_bsl<mode>. */ BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE) VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 65e6390..6edfd2d 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -750,14 +750,14 @@ [(set_attr "type" "neon<fp>_mul_<Vetype>_scalar<q>")] ) -(define_insn "*aarch64_mul3_elt_from_dup<mode>" +(define_insn "mul_n<mode>3" [(set (match_operand:VMUL 0 "register_operand" "=w") - (mult:VMUL - (vec_duplicate:VMUL - (match_operand:<VEL> 1 "register_operand" "<h_con>")) - (match_operand:VMUL 2 "register_operand" "w")))] + (mult:VMUL + (vec_duplicate:VMUL + (match_operand:<VEL> 2 "register_operand" "<h_con>")) + (match_operand:VMUL 1 "register_operand" "w")))] "TARGET_SIMD" - "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]"; + "<f>mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]"; [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")] ) @@ -2636,6 +2636,40 @@ [(set_attr "type" "neon_fp_abs_<stype><q>")] ) +(define_expand "aarch64_float_mla_n<mode>" + [(set (match_operand:VDQSF 0 "register_operand") + (plus:VDQSF + (mult:VDQSF + (vec_duplicate:VDQSF + (match_operand:<VEL> 3 "register_operand")) + (match_operand:VDQSF 2 "register_operand")) + (match_operand:VDQSF 1 "register_operand")))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (<MODE>mode); + emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3])); + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch)); + DONE; + } +) + +(define_expand "aarch64_float_mls_n<mode>" + [(set (match_operand:VDQSF 0 "register_operand") + (minus:VDQSF + (match_operand:VDQSF 1 "register_operand") + (mult:VDQSF + (vec_duplicate:VDQSF + (match_operand:<VEL> 3 "register_operand")) + (match_operand:VDQSF 2 "register_operand"))))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (<MODE>mode); + emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3])); + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch)); + DONE; + } +) + (define_insn "fma<mode>4" [(set (match_operand:VHSDF 0 "register_operand" "=w") (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w") diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index bde2d17..f1e1e0e 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7035,13 +7035,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) { - float32x2_t __result; - float32x2_t __t1; - __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c); } __extension__ extern __inline int16x4_t @@ -7388,13 +7382,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) { - float32x4_t __result; - float32x4_t __t1; - __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c); } __extension__ extern __inline int16x8_t @@ -7481,13 +7469,7 @@ __extension__ extern __inline float32x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) { - float32x2_t __result; - float32x2_t __t1; - __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c); } __extension__ extern __inline int16x4_t @@ -7838,13 +7820,7 @@ __extension__ extern __inline float32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) { - float32x4_t __result; - float32x4_t __t1; - __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s" - : "=w"(__result), "=w"(__t1) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c); } __extension__ extern __inline int16x8_t |