diff options
author | Levy Hsu <admin@levyhsu.com> | 2024-09-02 13:52:38 +0800 |
---|---|---|
committer | Levy Hsu <admin@levyhsu.com> | 2024-09-05 02:34:43 +0000 |
commit | f9ca3fd1fe30f3ee6725bfe4a612e9a1234c11ac (patch) | |
tree | 1c52f7d57426fede1d322b97f7785ba66fca455d /gcc | |
parent | 91421e21e8f0f05f440174b8de7a43a311700e08 (diff) | |
download | gcc-f9ca3fd1fe30f3ee6725bfe4a612e9a1234c11ac.zip gcc-f9ca3fd1fe30f3ee6725bfe4a612e9a1234c11ac.tar.gz gcc-f9ca3fd1fe30f3ee6725bfe4a612e9a1234c11ac.tar.bz2 |
i386: Support partial vectorized FMA for V2BF/V4BF
This patch introduces support for vectorized FMA operations for bf16 types in
V2BF and V4BF modes on the i386 architecture. New mode iterators and
define_expand entries for fma, fnma, fms, and fnms operations are added in
mmx.md, enhancing the i386 backend to handle these complex arithmetic operations.
gcc/ChangeLog:
* config/i386/mmx.md (TARGET_MMX_WITH_SSE): New mode iterator VBF_32_64
(fma<mode>4): define_expand for V2BF/V4BF fma<mode>4.
(fnma<mode>4): define_expand for V2BF/V4BF fnma<mode>4.
(fms<mode>4): define_expand for V2BF/V4BF fms<mode>4.
(fnms<mode>4): define_expand for V2BF/V4BF fnms<mode>4.
gcc/testsuite/ChangeLog:
* gcc.target/i386/avx10_2-partial-bf-vector-fma-1.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/config/i386/mmx.md | 80 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-fma-1.c | 57 |
2 files changed, 137 insertions, 0 deletions
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md index 0cfa9bd..4bc191b 100644 --- a/gcc/config/i386/mmx.md +++ b/gcc/config/i386/mmx.md @@ -2692,6 +2692,86 @@ DONE; }) +(define_expand "fma<mode>4" + [(set (match_operand:VBF_32_64 0 "register_operand") + (fma:VBF_32_64 + (match_operand:VBF_32_64 1 "nonimmediate_operand") + (match_operand:VBF_32_64 2 "nonimmediate_operand") + (match_operand:VBF_32_64 3 "nonimmediate_operand")))] + "TARGET_AVX10_2_256" +{ + rtx op0 = gen_reg_rtx (V8BFmode); + rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode); + rtx op2 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[2]), <MODE>mode); + rtx op3 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[3]), <MODE>mode); + + emit_insn (gen_fmav8bf4 (op0, op1, op2, op3)); + + emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8BFmode)); + DONE; +}) + +(define_expand "fms<mode>4" + [(set (match_operand:VBF_32_64 0 "register_operand") + (fma:VBF_32_64 + (match_operand:VBF_32_64 1 "nonimmediate_operand") + (match_operand:VBF_32_64 2 "nonimmediate_operand") + (neg:VBF_32_64 + (match_operand:VBF_32_64 3 "nonimmediate_operand"))))] + "TARGET_AVX10_2_256" +{ + rtx op0 = gen_reg_rtx (V8BFmode); + rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode); + rtx op2 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[2]), <MODE>mode); + rtx op3 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[3]), <MODE>mode); + + emit_insn (gen_fmsv8bf4 (op0, op1, op2, op3)); + + emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8BFmode)); + DONE; +}) + +(define_expand "fnma<mode>4" + [(set (match_operand:VBF_32_64 0 "register_operand") + (fma:VBF_32_64 + (neg:VBF_32_64 + (match_operand:VBF_32_64 1 "nonimmediate_operand")) + (match_operand:VBF_32_64 2 "nonimmediate_operand") + (match_operand:VBF_32_64 3 "nonimmediate_operand")))] + "TARGET_AVX10_2_256" +{ + rtx op0 = gen_reg_rtx (V8BFmode); + rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode); + rtx op2 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[2]), <MODE>mode); + rtx op3 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[3]), <MODE>mode); + + emit_insn (gen_fnmav8bf4 (op0, op1, op2, op3)); + + emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8BFmode)); + DONE; +}) + +(define_expand "fnms<mode>4" + [(set (match_operand:VBF_32_64 0 "register_operand") + (fma:VBF_32_64 + (neg:VBF_32_64 + (match_operand:VBF_32_64 1 "nonimmediate_operand")) + (match_operand:VBF_32_64 2 "nonimmediate_operand") + (neg:VBF_32_64 + (match_operand:VBF_32_64 3 "nonimmediate_operand"))))] + "TARGET_AVX10_2_256" +{ + rtx op0 = gen_reg_rtx (V8BFmode); + rtx op1 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[1]), <MODE>mode); + rtx op2 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[2]), <MODE>mode); + rtx op3 = lowpart_subreg (V8BFmode, force_reg (<MODE>mode, operands[3]), <MODE>mode); + + emit_insn (gen_fnmsv8bf4 (op0, op1, op2, op3)); + + emit_move_insn (operands[0], lowpart_subreg (<MODE>mode, op0, V8BFmode)); + DONE; +}) + ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel half-precision floating point complex type operations diff --git a/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-fma-1.c b/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-fma-1.c new file mode 100644 index 0000000..72e17e9 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/avx10_2-partial-bf-vector-fma-1.c @@ -0,0 +1,57 @@ +/* { dg-do compile } */ +/* { dg-options "-mavx10.2 -O2" } */ +/* { dg-final { scan-assembler-times "vfmadd132nepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vfmsub132nepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vfnmadd132nepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */ +/* { dg-final { scan-assembler-times "vfnmsub132nepbf16\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+\[^\n\r]*%xmm\[0-9\]+(?:\n|\[ \\t\]+#)" 2 } } */ + +typedef __bf16 v4bf __attribute__ ((__vector_size__ (8))); +typedef __bf16 v2bf __attribute__ ((__vector_size__ (4))); + +v4bf +foo_madd_64 (v4bf a, v4bf b, v4bf c) +{ + return a * b + c; +} + +v4bf +foo_msub_64 (v4bf a, v4bf b, v4bf c) +{ + return a * b - c; +} + +v4bf +foo_nmadd_64 (v4bf a, v4bf b, v4bf c) +{ + return -a * b + c; +} + +v4bf +foo_nmsub_64 (v4bf a, v4bf b, v4bf c) +{ + return -a * b - c; +} + +v2bf +foo_madd_32 (v2bf a, v2bf b, v2bf c) +{ + return a * b + c; +} + +v2bf +foo_msub_32 (v2bf a, v2bf b, v2bf c) +{ + return a * b - c; +} + +v2bf +foo_nmadd_32 (v2bf a, v2bf b, v2bf c) +{ + return -a * b + c; +} + +v2bf +foo_nmsub_32 (v2bf a, v2bf b, v2bf c) +{ + return -a * b - c; +} |