diff options
author | Jin Ma <jinma@linux.alibaba.com> | 2024-08-17 10:18:03 -0600 |
---|---|---|
committer | Jeff Law <jlaw@ventanamicro.com> | 2024-08-17 10:18:03 -0600 |
commit | 6d734ba485547329599f12bea63842a4fba8d72c (patch) | |
tree | 178db0e0287c5085cd9518e0c57fdf747babb0b2 /gcc | |
parent | 7aed8dedeb9613925930447bf2457c3fd9972d91 (diff) | |
download | gcc-6d734ba485547329599f12bea63842a4fba8d72c.zip gcc-6d734ba485547329599f12bea63842a4fba8d72c.tar.gz gcc-6d734ba485547329599f12bea63842a4fba8d72c.tar.bz2 |
RISC-V: Fix ICE for vector single-width integer multiply-add intrinsics
When rs1 is the immediate 0, the following ICE occurs:
error: unrecognizable insn:
(insn 8 5 12 2 (set (reg:RVVM1DI 134 [ <retval> ])
(if_then_else:RVVM1DI (unspec:RVVMF64BI [
(const_vector:RVVMF64BI repeat [
(const_int 1 [0x1])
])
(reg/v:DI 137 [ vl ])
(const_int 2 [0x2]) repeated x2
(const_int 0 [0])
(reg:SI 66 vl)
(reg:SI 67 vtype)
] UNSPEC_VPREDICATE)
(plus:RVVM1DI (mult:RVVM1DI (vec_duplicate:RVVM1DI (const_int 0 [0]))
(reg/v:RVVM1DI 136 [ vs2 ]))
(reg/v:RVVM1DI 135 [ vd ]))
(reg/v:RVVM1DI 135 [ vd ])))
gcc/ChangeLog:
* config/riscv/vector.md: Allow scalar operand to be 0.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/base/bug-7.c: New test.
* gcc.target/riscv/rvv/base/bug-8.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/config/riscv/vector.md | 80 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c | 26 | ||||
-rw-r--r-- | gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c | 26 |
3 files changed, 92 insertions, 40 deletions
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md index aad34b3..211bbc0 100644 --- a/gcc/config/riscv/vector.md +++ b/gcc/config/riscv/vector.md @@ -5331,16 +5331,16 @@ (plus:V_VLSI (mult:V_VLSI (vec_duplicate:V_VLSI - (match_operand:<VEL> 2 "register_operand" " r, r, r, r")) + (match_operand:<VEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ")) (match_operand:V_VLSI 3 "register_operand" " 0, vr, 0, vr")) (match_operand:V_VLSI 4 "register_operand" " vr, vr, vr, vr")) (match_dup 3)))] "TARGET_VECTOR" "@ - vmadd.vx\t%0,%2,%4%p1 - vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1 - vmadd.vx\t%0,%2,%4%p1 - vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1" + vmadd.vx\t%0,%z2,%4%p1 + vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%z2,%4%p1 + vmadd.vx\t%0,%z2,%4%p1 + vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%z2,%4%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "3") @@ -5363,16 +5363,16 @@ (plus:V_VLSI (mult:V_VLSI (vec_duplicate:V_VLSI - (match_operand:<VEL> 2 "register_operand" " r, r, r, r")) + (match_operand:<VEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ")) (match_operand:V_VLSI 3 "register_operand" " vr, vr, vr, vr")) (match_operand:V_VLSI 4 "register_operand" " 0, vr, 0, vr")) (match_dup 4)))] "TARGET_VECTOR" "@ - vmacc.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1 - vmacc.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1" + vmacc.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1 + vmacc.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "4") @@ -5431,16 +5431,16 @@ (mult:V_VLSI_D (vec_duplicate:V_VLSI_D (sign_extend:<VEL> - (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r"))) + (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))) (match_operand:V_VLSI_D 3 "register_operand" " 0, vr, 0, vr")) (match_operand:V_VLSI_D 4 "register_operand" " vr, vr, vr, vr")) (match_dup 3)))] "TARGET_VECTOR && !TARGET_64BIT" "@ - vmadd.vx\t%0,%2,%4%p1 - vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1 - vmadd.vx\t%0,%2,%4%p1 - vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1" + vmadd.vx\t%0,%z2,%4%p1 + vmv%m2r.v\t%0,%z2\;vmadd.vx\t%0,%z2,%4%p1 + vmadd.vx\t%0,%z2,%4%p1 + vmv%m2r.v\t%0,%z2\;vmadd.vx\t%0,%z2,%4%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "3") @@ -5464,16 +5464,16 @@ (mult:V_VLSI_D (vec_duplicate:V_VLSI_D (sign_extend:<VEL> - (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r"))) + (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))) (match_operand:V_VLSI_D 3 "register_operand" " vr, vr, vr, vr")) (match_operand:V_VLSI_D 4 "register_operand" " 0, vr, 0, vr")) (match_dup 4)))] "TARGET_VECTOR && !TARGET_64BIT" "@ - vmacc.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1 - vmacc.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1" + vmacc.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1 + vmacc.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "4") @@ -5630,15 +5630,15 @@ (match_operand:V_VLSI 4 "register_operand" " vr, vr, vr, vr") (mult:V_VLSI (vec_duplicate:V_VLSI - (match_operand:<VEL> 2 "register_operand" " r, r, r, r")) + (match_operand:<VEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ")) (match_operand:V_VLSI 3 "register_operand" " 0, vr, 0, vr"))) (match_dup 3)))] "TARGET_VECTOR" "@ - vnmsub.vx\t%0,%2,%4%p1 - vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1 - vnmsub.vx\t%0,%2,%4%p1 - vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1" + vnmsub.vx\t%0,%z2,%4%p1 + vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1 + vnmsub.vx\t%0,%z2,%4%p1 + vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "3") @@ -5662,15 +5662,15 @@ (match_operand:V_VLSI 4 "register_operand" " 0, vr, 0, vr") (mult:V_VLSI (vec_duplicate:V_VLSI - (match_operand:<VEL> 2 "register_operand" " r, r, r, r")) + (match_operand:<VEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ")) (match_operand:V_VLSI 3 "register_operand" " vr, vr, vr, vr"))) (match_dup 4)))] "TARGET_VECTOR" "@ - vnmsac.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1 - vnmsac.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1" + vnmsac.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1 + vnmsac.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "4") @@ -5730,15 +5730,15 @@ (mult:V_VLSI_D (vec_duplicate:V_VLSI_D (sign_extend:<VEL> - (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r"))) + (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))) (match_operand:V_VLSI_D 3 "register_operand" " 0, vr, 0, vr"))) (match_dup 3)))] "TARGET_VECTOR && !TARGET_64BIT" "@ - vnmsub.vx\t%0,%2,%4%p1 - vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1 - vnmsub.vx\t%0,%2,%4%p1 - vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1" + vnmsub.vx\t%0,%z2,%4%p1 + vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1 + vnmsub.vx\t%0,%z2,%4%p1 + vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "3") @@ -5763,15 +5763,15 @@ (mult:V_VLSI_D (vec_duplicate:V_VLSI_D (sign_extend:<VEL> - (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, r"))) + (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ, rJ, rJ, rJ"))) (match_operand:V_VLSI_D 3 "register_operand" " vr, vr, vr, vr"))) (match_dup 4)))] "TARGET_VECTOR && !TARGET_64BIT" "@ - vnmsac.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1 - vnmsac.vx\t%0,%2,%3%p1 - vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1" + vnmsac.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1 + vnmsac.vx\t%0,%z2,%3%p1 + vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1" [(set_attr "type" "vimuladd") (set_attr "mode" "<MODE>") (set_attr "merge_op_idx" "4") diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c new file mode 100644 index 0000000..28766ce --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c @@ -0,0 +1,26 @@ +/* Test that we do not have ice when compile */ +/* { dg-do assemble } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O2" { target { rv64 } } } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O2" { target { rv32 } } } */ + +#include <riscv_vector.h> + +vint64m1_t f1 (vint64m1_t vd, vint64m1_t vs2, size_t vl) +{ + return __riscv_vmacc_vx_i64m1 (vd, 0, vs2, vl); +} + +vint64m1_t f2 (vint64m1_t vd, vint64m1_t vs2, size_t vl) +{ + return __riscv_vnmsac_vx_i64m1 (vd, 0, vs2, vl); +} + +vint64m8_t f3 (vint64m8_t vd, vint64m8_t vs2, size_t vl) +{ + return __riscv_vmadd_vx_i64m8 (vd, 0, vs2, vl); +} + +vint64m1_t f4 (vint64m1_t vd, vint64m1_t vs2, size_t vl) +{ + return __riscv_vnmsub_vx_i64m1 (vd, 0, vs2, vl); +} diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c new file mode 100644 index 0000000..975f755 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c @@ -0,0 +1,26 @@ +/* Test that we do not have ice when compile */ +/* { dg-do assemble } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O0" { target { rv64 } } } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O0" { target { rv32 } } } */ + +#include <riscv_vector.h> + +vint64m1_t f1 (vint64m1_t vd, vint64m1_t vs2, size_t vl) +{ + return __riscv_vmacc_vx_i64m1 (vd, 0, vs2, vl); +} + +vint64m1_t f2 (vint64m1_t vd, vint64m1_t vs2, size_t vl) +{ + return __riscv_vnmsac_vx_i64m1 (vd, 0, vs2, vl); +} + +vint64m8_t f3 (vint64m8_t vd, vint64m8_t vs2, size_t vl) +{ + return __riscv_vmadd_vx_i64m8 (vd, 0, vs2, vl); +} + +vint64m1_t f4 (vint64m1_t vd, vint64m1_t vs2, size_t vl) +{ + return __riscv_vnmsub_vx_i64m1 (vd, 0, vs2, vl); +} |