diff options
Diffstat (limited to 'gcc')
| -rw-r--r-- | gcc/config/aarch64/aarch64-simd-builtins.def | 2 | ||||
| -rw-r--r-- | gcc/config/aarch64/aarch64-simd.md | 56 | ||||
| -rw-r--r-- | gcc/config/aarch64/arm_neon.h | 8 |
3 files changed, 62 insertions, 4 deletions
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 8e4b4ed..1e81bb5 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -674,6 +674,8 @@ BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP) BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP) BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP) + BUILTIN_VDQSF (QUADOP_LANE, float_mla_laneq, 0, FP) + BUILTIN_VDQSF (QUADOP_LANE, float_mls_laneq, 0, FP) /* Implemented by aarch64_simd_bsl<mode>. */ BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index bdee49f..2347629 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -734,6 +734,22 @@ [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")] ) +(define_insn "mul_laneq<mode>3" + [(set (match_operand:VDQSF 0 "register_operand" "=w") + (mult:VDQSF + (vec_duplicate:VDQSF + (vec_select:<VEL> + (match_operand:V4SF 2 "register_operand" "w") + (parallel [(match_operand:SI 3 "immediate_operand" "i")]))) + (match_operand:VDQSF 1 "register_operand" "w")))] + "TARGET_SIMD" + { + operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3])); + return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]"; + } + [(set_attr "type" "neon_fp_mul_s_scalar<q>")] +) + (define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>" [(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w") (mult:VMUL_CHANGE_NLANES @@ -2742,6 +2758,46 @@ } ) +(define_expand "aarch64_float_mla_laneq<mode>" + [(set (match_operand:VDQSF 0 "register_operand") + (plus:VDQSF + (mult:VDQSF + (vec_duplicate:VDQSF + (vec_select:<VEL> + (match_operand:V4SF 3 "register_operand") + (parallel [(match_operand:SI 4 "immediate_operand")]))) + (match_operand:VDQSF 2 "register_operand")) + (match_operand:VDQSF 1 "register_operand")))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (<MODE>mode); + emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2], + operands[3], operands[4])); + emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch)); + DONE; + } +) + +(define_expand "aarch64_float_mls_laneq<mode>" + [(set (match_operand:VDQSF 0 "register_operand") + (minus:VDQSF + (match_operand:VDQSF 1 "register_operand") + (mult:VDQSF + (vec_duplicate:VDQSF + (vec_select:<VEL> + (match_operand:V4SF 3 "register_operand") + (parallel [(match_operand:SI 4 "immediate_operand")]))) + (match_operand:VDQSF 2 "register_operand"))))] + "TARGET_SIMD" + { + rtx scratch = gen_reg_rtx (<MODE>mode); + emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2], + operands[3], operands[4])); + emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch)); + DONE; + } +) + (define_insn "fma<mode>4" [(set (match_operand:VHSDF 0 "register_operand" "=w") (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w") diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 5328d44..17e059e 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -20420,7 +20420,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmla_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mla_laneqv2sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x4_t @@ -20504,7 +20504,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { - return (__a + (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mla_laneqv4sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x8_t @@ -20618,7 +20618,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmls_laneq_f32 (float32x2_t __a, float32x2_t __b, float32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mls_laneqv2sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x4_t @@ -20702,7 +20702,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c, const int __lane) { - return (__a - (__b * __aarch64_vget_lane_any (__c, __lane))); + return __builtin_aarch64_float_mls_laneqv4sf (__a, __b, __c, __lane); } __extension__ extern __inline int16x8_t |
