aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJonathan Wright <jonathan.wright@arm.com>2021-02-16 15:42:36 +0000
committerJonathan Wright <jonathan.wright@arm.com>2021-04-30 18:40:54 +0100
commitb0d9aac8992c1f8c3198d9528a9867c653623dfb (patch)
treea21188d7f21698def6fac111aafce95ccfcf3c57 /gcc
parent60518e6473248b16db9125504da0351707c35d1a (diff)
downloadgcc-b0d9aac8992c1f8c3198d9528a9867c653623dfb.zip
gcc-b0d9aac8992c1f8c3198d9528a9867c653623dfb.tar.gz
gcc-b0d9aac8992c1f8c3198d9528a9867c653623dfb.tar.bz2
aarch64: Use RTL builtins for FP ml[as] intrinsics
Rewrite floating-point vml[as][q] Neon intrinsics to use RTL builtins rather than relying on the GCC vector extensions. Using RTL builtins allows control over the emission of fmla/fmls instructions (which we don't want here.) With this commit, the code generated by these intrinsics changes from a fused multiply-add/subtract instruction to an fmul followed by an fadd/fsub instruction. If the programmer really wants fmla/fmls instructions, they can use the vfm[as] intrinsics. gcc/ChangeLog: 2021-02-16 Jonathan Wright <jonathan.wright@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add float_ml[as] builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_float_mla<mode>): Define. (aarch64_float_mls<mode>): Define. * config/aarch64/arm_neon.h (vmla_f32): Use RTL builtin instead of relying on GCC vector extensions. (vmla_f64): Likewise. (vmlaq_f32): Likewise. (vmlaq_f64): Likewise. (vmls_f32): Likewise. (vmls_f64): Likewise. (vmlsq_f32): Likewise. (vmlsq_f64): Likewise. * config/aarch64/iterators.md: Define VDQF_DF mode iterator.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/aarch64/aarch64-simd-builtins.def2
-rw-r--r--gcc/config/aarch64/aarch64-simd.md32
-rw-r--r--gcc/config/aarch64/arm_neon.h16
-rw-r--r--gcc/config/aarch64/iterators.md1
4 files changed, 43 insertions, 8 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def
index 3b5e884..2a2fc20 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -668,6 +668,8 @@
BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
VAR1 (TERNOP, fnma, 4, FP, hf)
+ BUILTIN_VDQF_DF (TERNOP, float_mla, 0, FP)
+ BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 6edfd2d..0f96cd0 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -2636,6 +2636,38 @@
[(set_attr "type" "neon_fp_abs_<stype><q>")]
)
+(define_expand "aarch64_float_mla<mode>"
+ [(set (match_operand:VDQF_DF 0 "register_operand")
+ (plus:VDQF_DF
+ (mult:VDQF_DF
+ (match_operand:VDQF_DF 2 "register_operand")
+ (match_operand:VDQF_DF 3 "register_operand"))
+ (match_operand:VDQF_DF 1 "register_operand")))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul<mode>3 (scratch, operands[2], operands[3]));
+ emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
+(define_expand "aarch64_float_mls<mode>"
+ [(set (match_operand:VDQF_DF 0 "register_operand")
+ (minus:VDQF_DF
+ (match_operand:VDQF_DF 1 "register_operand")
+ (mult:VDQF_DF
+ (match_operand:VDQF_DF 2 "register_operand")
+ (match_operand:VDQF_DF 3 "register_operand"))))]
+ "TARGET_SIMD"
+ {
+ rtx scratch = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_mul<mode>3 (scratch, operands[2], operands[3]));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
+ DONE;
+ }
+)
+
(define_expand "aarch64_float_mla_n<mode>"
[(set (match_operand:VDQSF 0 "register_operand")
(plus:VDQSF
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index f1e1e0e..0227cad 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -20347,28 +20347,28 @@ __extension__ extern __inline float32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
- return __a + __b * __c;
+ return __builtin_aarch64_float_mlav2sf (__a, __b, __c);
}
__extension__ extern __inline float64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmla_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
{
- return __a + __b * __c;
+ return (float64x1_t) {__builtin_aarch64_float_mladf (__a[0], __b[0], __c[0])};
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return __a + __b * __c;
+ return __builtin_aarch64_float_mlav4sf (__a, __b, __c);
}
__extension__ extern __inline float64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlaq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
{
- return __a + __b * __c;
+ return __builtin_aarch64_float_mlav2df (__a, __b, __c);
}
/* vmla_lane */
@@ -20545,28 +20545,28 @@ __extension__ extern __inline float32x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
{
- return __a - __b * __c;
+ return __builtin_aarch64_float_mlsv2sf (__a, __b, __c);
}
__extension__ extern __inline float64x1_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmls_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
{
- return __a - __b * __c;
+ return (float64x1_t) {__builtin_aarch64_float_mlsdf (__a[0], __b[0], __c[0])};
}
__extension__ extern __inline float32x4_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
{
- return __a - __b * __c;
+ return __builtin_aarch64_float_mlsv4sf (__a, __b, __c);
}
__extension__ extern __inline float64x2_t
__attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
vmlsq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
{
- return __a - __b * __c;
+ return __builtin_aarch64_float_mlsv2df (__a, __b, __c);
}
/* vmls_lane */
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 634c44e..c57aa6b 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -149,6 +149,7 @@
V2SF V4SF V2DF])
;; Advanced SIMD Float modes, and DF.
+(define_mode_iterator VDQF_DF [V2SF V4SF V2DF DF])
(define_mode_iterator VHSDF_DF [(V4HF "TARGET_SIMD_F16INST")
(V8HF "TARGET_SIMD_F16INST")
V2SF V4SF V2DF DF])