//===- RISCVInstrInfoVVLPatterns.td - RVV VL patterns ------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// This file contains the required infrastructure and VL patterns to /// support code generation for the standard 'V' (Vector) extension, version /// version 1.0. /// /// This file is included from and depends upon RISCVInstrInfoVPseudos.td /// /// Note: the patterns for RVV intrinsics are found in /// RISCVInstrInfoVPseudos.td. /// //===----------------------------------------------------------------------===// // Splats an 64-bit value that has been split into two i32 parts. This is // expanded late to two scalar stores and a stride 0 vector load. // The first operand is passthru operand. // // This is only present to generate the correct TableGen SDNode description, // it is lowered before instruction selection. // FIXME: I'm not sure the types here are entirely correct. // Returns a vector. Operand 0 is a passthru, operand 1 and 2 are i32 scalars, operand 3 is VL def riscv_splat_vector_split_i64_vl : RVSDNode<"SPLAT_VECTOR_SPLIT_I64_VL", SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCVecEltisVT<0, i64>, SDTCisSameAs<1, 0>, SDTCisVT<2, i32>, SDTCisVT<3, i32>, SDTCisVT<4, XLenVT>]>>; // RISC-V vector tuple type version of INSERT_SUBVECTOR/EXTRACT_SUBVECTOR. def riscv_tuple_insert : RVSDNode<"TUPLE_INSERT", SDTypeProfile<1, 3, [SDTCisSameAs<1, 0>, SDTCisVec<2>, SDTCisVT<3, i32>]>>; def riscv_tuple_extract : RVSDNode<"TUPLE_EXTRACT", SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<2, i32>]>>; //===----------------------------------------------------------------------===// // Helpers to define the VL patterns. //===----------------------------------------------------------------------===// def SDT_RISCVIntUnOp_VL : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVec<0>, SDTCisInt<0>, SDTCVecEltisVT<3, i1>, SDTCisSameNumEltsAs<0, 3>, SDTCisVT<4, XLenVT>]>; def SDT_RISCVIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVec<0>, SDTCisInt<0>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; // Input: (vector, vector/scalar, passthru, mask, roundmode, vl) def SDT_RISCVVNBinOp_RM_VL : SDTypeProfile<1, 6, [SDTCisVec<0>, SDTCisInt<0>, SDTCisSameAs<0, 3>, SDTCisSameNumEltsAs<0, 1>, SDTCisVec<1>, SDTCisOpSmallerThanOp<2, 1>, SDTCisSameAs<0, 2>, SDTCisSameNumEltsAs<0, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>, SDTCisVT<6, XLenVT>]>; def SDT_RISCVFPUnOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisVec<0>, SDTCisFP<0>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<0, 2>, SDTCisVT<3, XLenVT>]>; def SDT_RISCVFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVec<0>, SDTCisFP<0>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; def SDT_RISCVCopySign_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVec<0>, SDTCisFP<0>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; // VMV_V_V_VL matches the semantics of vmv.v.v but includes an extra operand // for the VL value to be used for the operation. The first operand is // passthru operand. def riscv_vmv_v_v_vl : RVSDNode<"VMV_V_V_VL", SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<3, XLenVT>]>>; // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand // for the VL value to be used for the operation. The first operand is // passthru operand. def riscv_vmv_v_x_vl : RVSDNode<"VMV_V_X_VL", SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, XLenVT>, SDTCisVT<3, XLenVT>]>>; // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand // for the VL value to be used for the operation. The first operand is // passthru operand. def riscv_vfmv_v_f_vl : RVSDNode<"VFMV_V_F_VL", SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>, SDTCisSameAs<0, 1>, SDTCisEltOfVec<2, 0>, SDTCisVT<3, XLenVT>]>>; // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand. def riscv_vmv_s_x_vl : RVSDNode<"VMV_S_X_VL", SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisVT<2, XLenVT>, SDTCisVT<3, XLenVT>]>>; // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand. def riscv_vfmv_s_f_vl : RVSDNode<"VFMV_S_F_VL", SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisEltOfVec<2, 0>, SDTCisVT<3, XLenVT>]>>; // Vector binary ops with a passthru as a third operand, a mask as a fourth // operand, and VL as a fifth operand. let HasPassthruOp = true, HasMaskOp = true in { def riscv_add_vl : RVSDNode<"ADD_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_sub_vl : RVSDNode<"SUB_VL", SDT_RISCVIntBinOp_VL>; def riscv_mul_vl : RVSDNode<"MUL_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_mulhs_vl : RVSDNode<"MULHS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_mulhu_vl : RVSDNode<"MULHU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_and_vl : RVSDNode<"AND_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_or_vl : RVSDNode<"OR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_xor_vl : RVSDNode<"XOR_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_sdiv_vl : RVSDNode<"SDIV_VL", SDT_RISCVIntBinOp_VL>; def riscv_srem_vl : RVSDNode<"SREM_VL", SDT_RISCVIntBinOp_VL>; def riscv_udiv_vl : RVSDNode<"UDIV_VL", SDT_RISCVIntBinOp_VL>; def riscv_urem_vl : RVSDNode<"UREM_VL", SDT_RISCVIntBinOp_VL>; def riscv_shl_vl : RVSDNode<"SHL_VL", SDT_RISCVIntBinOp_VL>; def riscv_sra_vl : RVSDNode<"SRA_VL", SDT_RISCVIntBinOp_VL>; def riscv_srl_vl : RVSDNode<"SRL_VL", SDT_RISCVIntBinOp_VL>; def riscv_rotl_vl : RVSDNode<"ROTL_VL", SDT_RISCVIntBinOp_VL>; def riscv_rotr_vl : RVSDNode<"ROTR_VL", SDT_RISCVIntBinOp_VL>; def riscv_smin_vl : RVSDNode<"SMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_smax_vl : RVSDNode<"SMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_umin_vl : RVSDNode<"UMIN_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_umax_vl : RVSDNode<"UMAX_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_bitreverse_vl : RVSDNode<"BITREVERSE_VL", SDT_RISCVIntUnOp_VL>; def riscv_bswap_vl : RVSDNode<"BSWAP_VL", SDT_RISCVIntUnOp_VL>; def riscv_ctlz_vl : RVSDNode<"CTLZ_VL", SDT_RISCVIntUnOp_VL>; def riscv_cttz_vl : RVSDNode<"CTTZ_VL", SDT_RISCVIntUnOp_VL>; def riscv_ctpop_vl : RVSDNode<"CTPOP_VL", SDT_RISCVIntUnOp_VL>; // Averaging adds of signed integers. def riscv_avgfloors_vl : RVSDNode<"AVGFLOORS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; // Averaging adds of unsigned integers. def riscv_avgflooru_vl : RVSDNode<"AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; // Rounding averaging adds of signed integers. def riscv_avgceils_vl : RVSDNode<"AVGCEILS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; // Rounding averaging adds of unsigned integers. def riscv_avgceilu_vl : RVSDNode<"AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_saddsat_vl : RVSDNode<"SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_uaddsat_vl : RVSDNode<"UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>; def riscv_ssubsat_vl : RVSDNode<"SSUBSAT_VL", SDT_RISCVIntBinOp_VL>; def riscv_usubsat_vl : RVSDNode<"USUBSAT_VL", SDT_RISCVIntBinOp_VL>; def riscv_fadd_vl : RVSDNode<"FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; def riscv_fsub_vl : RVSDNode<"FSUB_VL", SDT_RISCVFPBinOp_VL>; def riscv_fmul_vl : RVSDNode<"FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; def riscv_fdiv_vl : RVSDNode<"FDIV_VL", SDT_RISCVFPBinOp_VL>; } // let HasPassthruOp = true, HasMaskOp = true // Vector unary ops with a mask as a second operand and VL as a third operand. let HasMaskOp = true in { def riscv_fneg_vl : RVSDNode<"FNEG_VL", SDT_RISCVFPUnOp_VL>; def riscv_fabs_vl : RVSDNode<"FABS_VL", SDT_RISCVFPUnOp_VL>; def riscv_fsqrt_vl : RVSDNode<"FSQRT_VL", SDT_RISCVFPUnOp_VL>; } // let HasMaskOp = true let HasPassthruOp = true, HasMaskOp = true in { def riscv_fcopysign_vl : RVSDNode<"FCOPYSIGN_VL", SDT_RISCVCopySign_VL>; def riscv_vfmin_vl : RVSDNode<"VFMIN_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; def riscv_vfmax_vl : RVSDNode<"VFMAX_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative]>; } // let HasPassthruOp = true, HasMaskOp = true let IsStrictFP = true, HasPassthruOp = true, HasMaskOp = true in { def riscv_strict_fadd_vl : RVSDNode<"STRICT_FADD_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; def riscv_strict_fsub_vl : RVSDNode<"STRICT_FSUB_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; def riscv_strict_fmul_vl : RVSDNode<"STRICT_FMUL_VL", SDT_RISCVFPBinOp_VL, [SDNPCommutative, SDNPHasChain]>; def riscv_strict_fdiv_vl : RVSDNode<"STRICT_FDIV_VL", SDT_RISCVFPBinOp_VL, [SDNPHasChain]>; } // let IsStrictFP = true, HasPassthruOp = true, HasMaskOp = true let IsStrictFP = true, HasMaskOp = true in def riscv_strict_fsqrt_vl : RVSDNode<"STRICT_FSQRT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; def any_riscv_fadd_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), [(riscv_fadd_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), (riscv_strict_fadd_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; def any_riscv_fsub_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), [(riscv_fsub_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), (riscv_strict_fsub_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; def any_riscv_fmul_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), [(riscv_fmul_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), (riscv_strict_fmul_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; def any_riscv_fdiv_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), [(riscv_fdiv_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl), (riscv_strict_fdiv_vl node:$lhs, node:$rhs, node:$passthru, node:$mask, node:$vl)]>; def any_riscv_fsqrt_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fsqrt_vl node:$src, node:$mask, node:$vl), (riscv_strict_fsqrt_vl node:$src, node:$mask, node:$vl)]>; let HasMaskOp = true in def riscv_fclass_vl : RVSDNode<"FCLASS_VL", SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisVec<0>, SDTCisFP<1>, SDTCisVec<1>, SDTCisSameSizeAs<0, 1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<0, 2>, SDTCisVT<3, XLenVT>]>>; def SDT_RISCVVecFMA_VL : SDTypeProfile<1, 5, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisVec<0>, SDTCisFP<0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; let HasMaskOp = true in { // Vector FMA ops with a mask as a fourth operand and VL as a fifth operand. def riscv_vfmadd_vl : RVSDNode<"VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; def riscv_vfnmadd_vl : RVSDNode<"VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; def riscv_vfmsub_vl : RVSDNode<"VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; def riscv_vfnmsub_vl : RVSDNode<"VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative]>; } def SDT_RISCVWVecFMA_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCisVec<1>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>; let HasMaskOp = true in { // Vector widening FMA ops with a mask as a fourth operand and VL as a fifth // operand. def riscv_vfwmadd_vl : RVSDNode<"VFWMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; def riscv_vfwnmadd_vl : RVSDNode<"VFWNMADD_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; def riscv_vfwmsub_vl : RVSDNode<"VFWMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; def riscv_vfwnmsub_vl : RVSDNode<"VFWNMSUB_VL", SDT_RISCVWVecFMA_VL, [SDNPCommutative]>; let IsStrictFP = true in { def riscv_strict_vfmadd_vl : RVSDNode<"STRICT_VFMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; def riscv_strict_vfnmadd_vl : RVSDNode<"STRICT_VFNMADD_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; def riscv_strict_vfmsub_vl : RVSDNode<"STRICT_VFMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; def riscv_strict_vfnmsub_vl : RVSDNode<"STRICT_VFNMSUB_VL", SDT_RISCVVecFMA_VL, [SDNPCommutative, SDNPHasChain]>; } // let IsStrictFP = true } // let HasMaskOp = true def any_riscv_vfmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), [(riscv_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), (riscv_strict_vfmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; def any_riscv_vfnmadd_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), [(riscv_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), (riscv_strict_vfnmadd_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; def any_riscv_vfmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), [(riscv_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), (riscv_strict_vfmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; def any_riscv_vfnmsub_vl : PatFrags<(ops node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), [(riscv_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl), (riscv_strict_vfnmsub_vl node:$rs1, node:$rs2, node:$rs3, node:$mask, node:$vl)]>; def SDT_RISCVFPRoundOp_VL : SDTypeProfile<1, 3, [ SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> ]>; def SDT_RISCVFPExtendOp_VL : SDTypeProfile<1, 3, [ SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> ]>; let HasMaskOp = true in { def riscv_fpround_vl : RVSDNode<"FP_ROUND_VL", SDT_RISCVFPRoundOp_VL>; def riscv_fpextend_vl : RVSDNode<"FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL>; // Matches the semantics of the vfcnvt.rod function (Convert double-width // float to single-width float, rounding towards odd). Takes a double-width // float vector and produces a single-width float vector. Also has a mask and // VL operand. def riscv_fncvt_rod_vl : RVSDNode<"VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL>; let IsStrictFP = true in { def riscv_strict_fpround_vl : RVSDNode<"STRICT_FP_ROUND_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; def riscv_strict_fpextend_vl : RVSDNode<"STRICT_FP_EXTEND_VL", SDT_RISCVFPExtendOp_VL, [SDNPHasChain]>; def riscv_strict_fncvt_rod_vl : RVSDNode<"STRICT_VFNCVT_ROD_VL", SDT_RISCVFPRoundOp_VL, [SDNPHasChain]>; } // let IsStrictFP = true } // let HasMaskOp = true def any_riscv_fpround_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fpround_vl node:$src, node:$mask, node:$vl), (riscv_strict_fpround_vl node:$src, node:$mask, node:$vl)]>; def any_riscv_fpextend_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fpextend_vl node:$src, node:$mask, node:$vl), (riscv_strict_fpextend_vl node:$src, node:$mask, node:$vl)]>; def any_riscv_fncvt_rod_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_fncvt_rod_vl node:$src, node:$mask, node:$vl), (riscv_strict_fncvt_rod_vl node:$src, node:$mask, node:$vl)]>; def SDT_RISCVFP2IOp_VL : SDTypeProfile<1, 3, [ SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> ]>; def SDT_RISCVFP2IOp_RM_VL : SDTypeProfile<1, 4, [ SDTCisInt<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, SDTCisVT<4, XLenVT> // Rounding mode ]>; def SDT_RISCVI2FPOp_VL : SDTypeProfile<1, 3, [ SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT> ]>; def SDT_RISCVI2FPOp_RM_VL : SDTypeProfile<1, 4, [ SDTCisFP<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>, SDTCisVT<4, XLenVT> // Rounding mode ]>; def SDT_RISCVSETCCOP_VL : SDTypeProfile<1, 6, [ SDTCVecEltisVT<0, i1>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>, SDTCisSameAs<0, 4>, SDTCisSameAs<0, 5>, SDTCisVT<6, XLenVT>]>; // Float -> Int let HasMaskOp = true in { def riscv_vfcvt_rm_xu_f_vl : RVSDNode<"VFCVT_RM_XU_F_VL", SDT_RISCVFP2IOp_RM_VL>; def riscv_vfcvt_rm_x_f_vl : RVSDNode<"VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL>; def riscv_vfcvt_rtz_xu_f_vl : RVSDNode<"VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL>; def riscv_vfcvt_rtz_x_f_vl : RVSDNode<"VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL>; let IsStrictFP = true in { def riscv_strict_vfcvt_rm_x_f_vl : RVSDNode<"STRICT_VFCVT_RM_X_F_VL", SDT_RISCVFP2IOp_RM_VL, [SDNPHasChain]>; def riscv_strict_vfcvt_rtz_xu_f_vl : RVSDNode<"STRICT_VFCVT_RTZ_XU_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; def riscv_strict_vfcvt_rtz_x_f_vl : RVSDNode<"STRICT_VFCVT_RTZ_X_F_VL", SDT_RISCVFP2IOp_VL, [SDNPHasChain]>; } // let IsStrictFP = true } // let HasMaskOp = true def any_riscv_vfcvt_rm_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl, node:$rm), [(riscv_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm), (riscv_strict_vfcvt_rm_x_f_vl node:$src, node:$mask, node:$vl, node:$rm)]>; def any_riscv_vfcvt_rtz_xu_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl), (riscv_strict_vfcvt_rtz_xu_f_vl node:$src, node:$mask, node:$vl)]>; def any_riscv_vfcvt_rtz_x_f_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl), (riscv_strict_vfcvt_rtz_x_f_vl node:$src, node:$mask, node:$vl)]>; // Int -> Float let HasMaskOp = true in { def riscv_sint_to_fp_vl : RVSDNode<"SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; def riscv_uint_to_fp_vl : RVSDNode<"UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL>; def riscv_vfcvt_rm_f_xu_vl : RVSDNode<"VFCVT_RM_F_XU_VL", SDT_RISCVI2FPOp_RM_VL>; def riscv_vfcvt_rm_f_x_vl : RVSDNode<"VFCVT_RM_F_X_VL", SDT_RISCVI2FPOp_RM_VL>; let IsStrictFP = true in { def riscv_strict_sint_to_fp_vl : RVSDNode<"STRICT_SINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; def riscv_strict_uint_to_fp_vl : RVSDNode<"STRICT_UINT_TO_FP_VL", SDT_RISCVI2FPOp_VL, [SDNPHasChain]>; } // let IsStrictFP = true } // let HasMaskOp = true def any_riscv_sint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_sint_to_fp_vl node:$src, node:$mask, node:$vl), (riscv_strict_sint_to_fp_vl node:$src, node:$mask, node:$vl)]>; def any_riscv_uint_to_fp_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_uint_to_fp_vl node:$src, node:$mask, node:$vl), (riscv_strict_uint_to_fp_vl node:$src, node:$mask, node:$vl)]>; let HasMaskOp = true in { def riscv_vfround_noexcept_vl: RVSDNode<"VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL>; let IsStrictFP = true in def riscv_strict_vfround_noexcept_vl: RVSDNode<"STRICT_VFROUND_NOEXCEPT_VL", SDT_RISCVFPUnOp_VL, [SDNPHasChain]>; } def any_riscv_vfround_noexcept_vl : PatFrags<(ops node:$src, node:$mask, node:$vl), [(riscv_vfround_noexcept_vl node:$src, node:$mask, node:$vl), (riscv_strict_vfround_noexcept_vl node:$src, node:$mask, node:$vl)]>; // Vector compare producing a mask. Fourth operand is input mask. Fifth // operand is VL. let HasPassthruOp = true, HasMaskOp = true in def riscv_setcc_vl : RVSDNode<"SETCC_VL", SDT_RISCVSETCCOP_VL>; let IsStrictFP = true, HasMaskOp = true in { def riscv_strict_fsetcc_vl : RVSDNode<"STRICT_FSETCC_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; def riscv_strict_fsetccs_vl : RVSDNode<"STRICT_FSETCCS_VL", SDT_RISCVSETCCOP_VL, [SDNPHasChain]>; } // let IsStrictFP = true, HasMaskOp = true def any_riscv_fsetcc_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), (riscv_strict_fsetcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>; def any_riscv_fsetccs_vl : PatFrags<(ops node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), [(riscv_setcc_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl), (riscv_strict_fsetccs_vl node:$lhs, node:$rhs, node:$cc, node:$passthru, node:$mask, node:$vl)]>; let HasMaskOp = true in { // Matches the semantics of vrgather.vx and vrgather.vv with extra operands // for passthru and VL, except that out of bound indices result in a poison // result not zero. Operands are (src, index, mask, passthru, vl). def riscv_vrgather_vx_vl : RVSDNode<"VRGATHER_VX_VL", SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisVT<2, XLenVT>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>>; def riscv_vrgather_vv_vl : RVSDNode<"VRGATHER_VV_VL", SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisSameNumEltsAs<0, 2>, SDTCisSameSizeAs<0, 2>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>>; def riscv_vrgatherei16_vv_vl : RVSDNode<"VRGATHEREI16_VV_VL", SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCVecEltisVT<2, i16>, SDTCisSameNumEltsAs<0, 2>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>]>>; } // let HasMaskOp = true def SDT_RISCVVMERGE_VL : SDTypeProfile<1, 5, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisSameNumEltsAs<0, 1>, SDTCVecEltisVT<1, i1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>, SDTCisSameAs<0, 4>, SDTCisVT<5, XLenVT> ]>; // General vmerge node with mask, true, false, passthru, and vl operands. // Tail agnostic vselect can be implemented by setting passthru to undef. let HasPassthruOp = true in def riscv_vmerge_vl : RVSDNode<"VMERGE_VL", SDT_RISCVVMERGE_VL>; def SDT_RISCVVMSETCLR_VL : SDTypeProfile<1, 1, [SDTCVecEltisVT<0, i1>, SDTCisVT<1, XLenVT>]>; // Set mask vector to all zeros or ones. def riscv_vmclr_vl : RVSDNode<"VMCLR_VL", SDT_RISCVVMSETCLR_VL>; def riscv_vmset_vl : RVSDNode<"VMSET_VL", SDT_RISCVVMSETCLR_VL>; def SDT_RISCVMaskBinOp_VL : SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCVecEltisVT<0, i1>, SDTCisVT<3, XLenVT>]>; // Mask binary operators. def riscv_vmand_vl : RVSDNode<"VMAND_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; def riscv_vmor_vl : RVSDNode<"VMOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; def riscv_vmxor_vl : RVSDNode<"VMXOR_VL", SDT_RISCVMaskBinOp_VL, [SDNPCommutative]>; def true_mask : PatLeaf<(riscv_vmset_vl (XLenVT srcvalue))>; def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl), (riscv_vmxor_vl node:$rs, true_mask, node:$vl)>; let HasMaskOp = true in { // vcpop.m with additional mask and VL operands. def riscv_vcpop_vl : RVSDNode<"VCPOP_VL", SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, SDTCisVec<1>, SDTCisInt<1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>]>>; // vfirst.m with additional mask and VL operands. def riscv_vfirst_vl : RVSDNode<"VFIRST_VL", SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>, SDTCisVec<1>, SDTCisInt<1>, SDTCVecEltisVT<2, i1>, SDTCisSameNumEltsAs<1, 2>, SDTCisVT<3, XLenVT>]>>; } // let HasMaskOp = true def SDT_RISCVVEXTEND_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameNumEltsAs<0, 1>, SDTCisSameNumEltsAs<1, 2>, SDTCVecEltisVT<2, i1>, SDTCisVT<3, XLenVT>]>; let HasMaskOp = true in { // Vector sign/zero extend with additional mask & VL operands. def riscv_sext_vl : RVSDNode<"VSEXT_VL", SDT_RISCVVEXTEND_VL>; def riscv_zext_vl : RVSDNode<"VZEXT_VL", SDT_RISCVVEXTEND_VL>; } // let HasMaskOp = true def riscv_ext_vl : PatFrags<(ops node:$A, node:$B, node:$C), [(riscv_sext_vl node:$A, node:$B, node:$C), (riscv_zext_vl node:$A, node:$B, node:$C)]>; def SDT_RISCVVTRUNCATE_VL : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameNumEltsAs<0, 1>, SDTCisSameNumEltsAs<0, 2>, SDTCVecEltisVT<2, i1>, SDTCisVT<3, XLenVT>]>; let HasMaskOp = true in { // Truncates a RVV integer vector by one power-of-two. Carries both an extra // mask and VL operand. def riscv_trunc_vector_vl : RVSDNode<"TRUNCATE_VECTOR_VL", SDT_RISCVVTRUNCATE_VL>; // Truncates a RVV integer vector by one power-of-two. If the value doesn't // fit in the destination type, the result is saturated. These correspond to // vnclip and vnclipu with a shift of 0. Carries both an extra mask and VL // operand. def riscv_trunc_vector_vl_ssat : RVSDNode<"TRUNCATE_VECTOR_VL_SSAT", SDT_RISCVVTRUNCATE_VL>; def riscv_trunc_vector_vl_usat : RVSDNode<"TRUNCATE_VECTOR_VL_USAT", SDT_RISCVVTRUNCATE_VL>; } // let HasMaskOp = true def SDT_RISCVVWIntBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameAs<1, 2>, SDTCisSameAs<0, 3>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; let HasPassthruOp = true, HasMaskOp = true in { // Widening instructions with a passthru value a third operand, a mask as a // fourth operand, and VL as a fifth operand. def riscv_vwmul_vl : RVSDNode<"VWMUL_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; def riscv_vwmulu_vl : RVSDNode<"VWMULU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; def riscv_vwmulsu_vl : RVSDNode<"VWMULSU_VL", SDT_RISCVVWIntBinOp_VL>; def riscv_vwadd_vl : RVSDNode<"VWADD_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; def riscv_vwaddu_vl : RVSDNode<"VWADDU_VL", SDT_RISCVVWIntBinOp_VL, [SDNPCommutative]>; def riscv_vwsub_vl : RVSDNode<"VWSUB_VL", SDT_RISCVVWIntBinOp_VL, []>; def riscv_vwsubu_vl : RVSDNode<"VWSUBU_VL", SDT_RISCVVWIntBinOp_VL, []>; def riscv_vwsll_vl : RVSDNode<"VWSLL_VL", SDT_RISCVVWIntBinOp_VL, []>; } // let HasPassthruOp = true, HasMaskOp = true def SDT_RISCVVWIntTernOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisInt<1>, SDTCisSameNumEltsAs<0, 1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameAs<1, 2>, SDTCisSameAs<0, 3>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; let HasMaskOp = true in { // Widening ternary operations with a mask as the fourth operand and VL as the // fifth operand. def riscv_vwmacc_vl : RVSDNode<"VWMACC_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; def riscv_vwmaccu_vl : RVSDNode<"VWMACCU_VL", SDT_RISCVVWIntTernOp_VL, [SDNPCommutative]>; def riscv_vwmaccsu_vl : RVSDNode<"VWMACCSU_VL", SDT_RISCVVWIntTernOp_VL, []>; } // let HasMaskOp = true def SDT_RISCVVWFPBinOp_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCisFP<1>, SDTCisSameNumEltsAs<0, 1>, SDTCisOpSmallerThanOp<1, 0>, SDTCisSameAs<1, 2>, SDTCisSameAs<0, 3>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; let HasPassthruOp = true, HasMaskOp = true in { def riscv_vfwmul_vl : RVSDNode<"VFWMUL_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; def riscv_vfwadd_vl : RVSDNode<"VFWADD_VL", SDT_RISCVVWFPBinOp_VL, [SDNPCommutative]>; def riscv_vfwsub_vl : RVSDNode<"VFWSUB_VL", SDT_RISCVVWFPBinOp_VL, []>; } // let HasPassthruOp = true, HasMaskOp = true def SDT_RISCVVWIntBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>, SDTCisSameNumEltsAs<1, 2>, SDTCisOpSmallerThanOp<2, 1>, SDTCisSameAs<0, 3>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; let HasPassthruOp = true, HasMaskOp = true in { def riscv_vwadd_w_vl : RVSDNode<"VWADD_W_VL", SDT_RISCVVWIntBinOpW_VL>; def riscv_vwaddu_w_vl : RVSDNode<"VWADDU_W_VL", SDT_RISCVVWIntBinOpW_VL>; def riscv_vwsub_w_vl : RVSDNode<"VWSUB_W_VL", SDT_RISCVVWIntBinOpW_VL>; def riscv_vwsubu_w_vl : RVSDNode<"VWSUBU_W_VL", SDT_RISCVVWIntBinOpW_VL>; } // let HasPassthruOp = true, HasMaskOp = true def SDT_RISCVVWFPBinOpW_VL : SDTypeProfile<1, 5, [SDTCisVec<0>, SDTCisFP<0>, SDTCisSameAs<0, 1>, SDTCisFP<2>, SDTCisSameNumEltsAs<1, 2>, SDTCisOpSmallerThanOp<2, 1>, SDTCisSameAs<0, 3>, SDTCisSameNumEltsAs<1, 4>, SDTCVecEltisVT<4, i1>, SDTCisVT<5, XLenVT>]>; let HasPassthruOp = true, HasMaskOp = true in { def riscv_vfwadd_w_vl : RVSDNode<"VFWADD_W_VL", SDT_RISCVVWFPBinOpW_VL>; def riscv_vfwsub_w_vl : RVSDNode<"VFWSUB_W_VL", SDT_RISCVVWFPBinOpW_VL>; } // let HasPassthruOp = true, HasMaskOp = true def SDTRVVVecReduce : SDTypeProfile<1, 6, [ SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<2, 4>, SDTCisVT<5, XLenVT>, SDTCisVT<6, XLenVT> ]>; let HasOneUse = 1 in { def riscv_add_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_add_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_or_vl_is_add_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_or_vl node:$A, node:$B, node:$C, node:$D, node:$E), [{ return orDisjoint(N); }]>; def riscv_sub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_sub_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_mul_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_vwmul_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_vwmulu_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_vwmulsu_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_sext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), (riscv_sext_vl node:$A, node:$B, node:$C)>; def riscv_zext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), (riscv_zext_vl node:$A, node:$B, node:$C)>; def riscv_ext_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), (riscv_ext_vl node:$A, node:$B, node:$C)>; def riscv_fpextend_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C), (riscv_fpextend_vl node:$A, node:$B, node:$C)>; def riscv_vfmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_vfmadd_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_vfnmadd_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_vfnmadd_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_vfmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_vfmsub_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; def riscv_vfnmsub_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D, node:$E), (riscv_vfnmsub_vl node:$A, node:$B, node:$C, node:$D, node:$E)>; } // HasOneUse = 1 def riscv_fpextend_vl_sameuser : PatFrag<(ops node:$A, node:$B, node:$C), (riscv_fpextend_vl node:$A, node:$B, node:$C), [{ return !N->use_empty() && all_equal(N->users()); }]>; // These nodes match the semantics of the corresponding RVV vector reduction // instructions. They produce a vector result which is the reduction // performed over the second vector operand plus the first element of the // third vector operand. The first operand is the pass-thru operand. The // second operand is an unconstrained vector type, and the result, first, and // third operand's types are expected to be the corresponding full-width // LMUL=1 type for the second operand: // nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8 // nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32 // The different in types does introduce extra vsetvli instructions but // similarly it reduces the number of registers consumed per reduction. // Also has a mask and VL operand. let HasMaskOp = true in foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR", "FADD", "SEQ_FADD", "FMIN", "FMAX"] in def rvv_vecreduce_#kind#_vl : RVSDNode<"VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>; // Give explicit Complexity to prefer simm5/uimm5. def SplatPat : ComplexPattern; def SplatPat_simm5 : ComplexPattern; def SplatPat_uimm5 : ComplexPattern", [], [], 3>; def SplatPat_uimm6 : ComplexPattern", [], [], 3>; def SplatPat_simm5_plus1 : ComplexPattern; def SplatPat_simm5_plus1_nodec : ComplexPattern; def SplatPat_simm5_plus1_nonzero : ComplexPattern; def SplatPat_imm64_neg : ComplexPattern; // Selects extends or truncates of splats where we only care about the lowest 8 // bits of each element. def Low8BitsSplatPat : ComplexPattern; // Ignore the vl operand on vmv_v_f, and vmv_s_f. def SplatFPOp : PatFrags<(ops node:$op), [(riscv_vfmv_v_f_vl undef, node:$op, srcvalue), (riscv_vfmv_s_f_vl undef, node:$op, srcvalue)]>; def sew8simm5 : ComplexPattern", []>; def sew16simm5 : ComplexPattern", []>; def sew32simm5 : ComplexPattern", []>; def sew64simm5 : ComplexPattern", []>; class VPatBinaryVL_V : Pat<(result_type (vop (op1_type op1_reg_class:$rs1), (op2_type op2_reg_class:$rs2), (result_type result_reg_class:$passthru), (mask_type VMV0:$vm), VLOpFrag)), (!cast( !if(isSEWAware, instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) result_reg_class:$passthru, op1_reg_class:$rs1, op2_reg_class:$rs2, (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; class VPatBinaryVL_V_RM : Pat<(result_type (vop (op1_type op1_reg_class:$rs1), (op2_type op2_reg_class:$rs2), (result_type result_reg_class:$passthru), (mask_type VMV0:$vm), VLOpFrag)), (!cast( !if(isSEWAware, instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", instruction_name#"_"#suffix#"_"#vlmul.MX#"_MASK")) result_reg_class:$passthru, op1_reg_class:$rs1, op2_reg_class:$rs2, (mask_type VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, log2sew, TAIL_AGNOSTIC)>; multiclass VPatTiedBinaryNoMaskVL_V { def : Pat<(result_type (vop (result_type result_reg_class:$rs1), (op2_type op2_reg_class:$rs2), srcvalue, true_mask, VLOpFrag)), (!cast(instruction_name#"_"#suffix#"_"# vlmul.MX#"_TIED") result_reg_class:$rs1, op2_reg_class:$rs2, GPR:$vl, sew, TAIL_AGNOSTIC)>; } class VPatTiedBinaryMaskVL_V : Pat<(result_type (vop (result_type result_reg_class:$rs1), (op2_type op2_reg_class:$rs2), (result_type result_reg_class:$rs1), (mask_type VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_"#suffix#"_"# vlmul.MX#"_MASK_TIED") result_reg_class:$rs1, op2_reg_class:$rs2, (mask_type VMV0:$vm), GPR:$vl, sew, TU_MU)>; multiclass VPatTiedBinaryNoMaskVL_V_RM { defvar name = !if(isSEWAware, instruction_name#"_"#suffix#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_TIED", instruction_name#"_"#suffix#"_"#vlmul.MX#"_TIED"); def : Pat<(result_type (vop (result_type result_reg_class:$rs1), (op2_type op2_reg_class:$rs2), srcvalue, true_mask, VLOpFrag)), (!cast(name) result_reg_class:$rs1, op2_reg_class:$rs2, // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, log2sew, TAIL_AGNOSTIC)>; } class VPatBinaryVL_XI : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), (vop2_type (SplatPatKind (XLenVT xop_kind:$rs2))), (result_type result_reg_class:$passthru), (mask_type VMV0:$vm), VLOpFrag)), (!cast( !if(isSEWAware, instruction_name#_#suffix#_#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", instruction_name#_#suffix#_#vlmul.MX#"_MASK")) result_reg_class:$passthru, vop_reg_class:$rs1, xop_kind:$rs2, (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; multiclass VPatBinaryVL_VV_VX vtilist = AllIntegerVectors, bit isSEWAware = 0> { foreach vti = vtilist in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinaryVL_V; def : VPatBinaryVL_XI; } } } multiclass VPatBinaryVL_VV_VX_VI : VPatBinaryVL_VV_VX { foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in def : VPatBinaryVL_XI(SplatPat#_#ImmType), ImmType>; } } multiclass VPatBinaryWVL_VV_VX { foreach VtiToWti = AllWidenableIntVectors in { defvar vti = VtiToWti.Vti; defvar wti = VtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : VPatBinaryVL_V; def : VPatBinaryVL_XI; } } } multiclass VPatBinaryWVL_VV_VX_WV_WX : VPatBinaryWVL_VV_VX { foreach VtiToWti = AllWidenableIntVectors in { defvar vti = VtiToWti.Vti; defvar wti = VtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { defm : VPatTiedBinaryNoMaskVL_V; def : VPatTiedBinaryMaskVL_V; def : VPatBinaryVL_V; def : VPatBinaryVL_XI; } } } class VPatBinaryVL_VF : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), (vop2_type (SplatFPOp scalar_reg_class:$rs2)), (result_type result_reg_class:$passthru), (mask_type VMV0:$vm), VLOpFrag)), (!cast( !if(isSEWAware, instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", instruction_name#"_"#vlmul.MX#"_MASK")) result_reg_class:$passthru, vop_reg_class:$rs1, scalar_reg_class:$rs2, (mask_type VMV0:$vm), GPR:$vl, log2sew, TAIL_AGNOSTIC)>; class VPatBinaryVL_VF_RM : Pat<(result_type (vop (vop1_type vop_reg_class:$rs1), (vop2_type (SplatFPOp scalar_reg_class:$rs2)), (result_type result_reg_class:$passthru), (mask_type VMV0:$vm), VLOpFrag)), (!cast( !if(isSEWAware, instruction_name#"_"#vlmul.MX#"_E"#!shl(1, log2sew)#"_MASK", instruction_name#"_"#vlmul.MX#"_MASK")) result_reg_class:$passthru, vop_reg_class:$rs1, scalar_reg_class:$rs2, (mask_type VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, log2sew, TAIL_AGNOSTIC)>; multiclass VPatBinaryFPVL_VV_VF { foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinaryVL_V; def : VPatBinaryVL_VF; } } } multiclass VPatBinaryFPVL_VV_VF_RM { foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : VPatBinaryVL_V_RM; def : VPatBinaryVL_VF_RM; } } } multiclass VPatBinaryFPVL_R_VF { foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), fvti.RegClass:$rs1, (fvti.Vector fvti.RegClass:$passthru), (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast( !if(isSEWAware, instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) fvti.RegClass:$passthru, fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } multiclass VPatBinaryFPVL_R_VF_RM { foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in def : Pat<(fvti.Vector (vop (SplatFPOp fvti.ScalarRegClass:$rs2), fvti.RegClass:$rs1, (fvti.Vector fvti.RegClass:$passthru), (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast( !if(isSEWAware, instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK", instruction_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK")) fvti.RegClass:$passthru, fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; } } multiclass VPatIntegerSetCCVL_VV { def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), vti.RegClass:$rs2, cc, VR:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") VR:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; } // Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. multiclass VPatIntegerSetCCVL_VV_Swappable : VPatIntegerSetCCVL_VV { def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), vti.RegClass:$rs1, invcc, VR:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") VR:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; } multiclass VPatIntegerSetCCVL_VX_Swappable { defvar instruction_masked = !cast(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), (SplatPat (XLenVT GPR:$rs2)), cc, VR:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (instruction_masked VR:$passthru, vti.RegClass:$rs1, GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), (vti.Vector vti.RegClass:$rs1), invcc, VR:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (instruction_masked VR:$passthru, vti.RegClass:$rs1, GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; } multiclass VPatIntegerSetCCVL_VI_Swappable { defvar instruction_masked = !cast(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), (splatpat_kind simm5:$rs2), cc, VR:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (instruction_masked VR:$passthru, vti.RegClass:$rs1, XLenVT:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; // FIXME: Can do some canonicalization to remove these patterns. def : Pat<(vti.Mask (riscv_setcc_vl (splatpat_kind simm5:$rs2), (vti.Vector vti.RegClass:$rs1), invcc, VR:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (instruction_masked VR:$passthru, vti.RegClass:$rs1, simm5:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MU)>; } multiclass VPatFPSetCCVL_VV_VF_FV { foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), fvti.RegClass:$rs2, cc, VR:$passthru, (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast(inst_name#"_VV_"#fvti.LMul.MX#"_MASK") VR:$passthru, fvti.RegClass:$rs1, fvti.RegClass:$rs2, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MU)>; def : Pat<(fvti.Mask (vop (fvti.Vector fvti.RegClass:$rs1), (SplatFPOp fvti.ScalarRegClass:$rs2), cc, VR:$passthru, (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast(inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") VR:$passthru, fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MU)>; def : Pat<(fvti.Mask (vop (SplatFPOp fvti.ScalarRegClass:$rs2), (fvti.Vector fvti.RegClass:$rs1), cc, VR:$passthru, (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast(swapped_op_inst_name#"_V"#fvti.ScalarSuffix#"_"#fvti.LMul.MX#"_MASK") VR:$passthru, fvti.RegClass:$rs1, fvti.ScalarRegClass:$rs2, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MU)>; } } } multiclass VPatExtendVL_V fraction_list> { foreach vtiTofti = fraction_list in { defvar vti = vtiTofti.Vti; defvar fti = vtiTofti.Fti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(vti.Vector (vop (fti.Vector fti.RegClass:$rs2), (fti.Mask VMV0:$vm), VLOpFrag)), (!cast(inst_name#"_"#suffix#"_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), fti.RegClass:$rs2, (fti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; } } // Single width converting multiclass VPatConvertFP2IVL_V { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_"#ivti.LMul.MX#"_MASK") (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), GPR:$vl, ivti.Log2SEW, TA_MA)>; } } multiclass VPatConvertFP2I_RM_VL_V { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(ivti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask VMV0:$vm), (XLenVT timm:$frm), VLOpFrag)), (!cast(instruction_name#"_"#ivti.LMul.MX#"_MASK") (ivti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), timm:$frm, GPR:$vl, ivti.Log2SEW, TA_MA)>; } } multiclass VPatConvertI2FPVL_V_RM { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), (ivti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, (ivti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, fvti.Log2SEW, TA_MA)>; } } multiclass VPatConvertI2FP_RM_VL_V { foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), (ivti.Mask VMV0:$vm), (XLenVT timm:$frm), VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, (ivti.Mask VMV0:$vm), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; } } // Widening converting multiclass VPatWConvertFP2IVL_V { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MA)>; } } multiclass VPatWConvertFP2I_RM_VL_V { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(iwti.Vector (vop (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask VMV0:$vm), (XLenVT timm:$frm), VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_MASK") (iwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; } } multiclass VPatWConvertI2FPVL_V { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar ivti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fwti.Vector (vop (ivti.Vector ivti.RegClass:$rs1), (ivti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_"#ivti.LMul.MX#"_E"#ivti.SEW#"_MASK") (fwti.Vector (IMPLICIT_DEF)), ivti.RegClass:$rs1, (ivti.Mask VMV0:$vm), GPR:$vl, ivti.Log2SEW, TA_MA)>; } } // Narrowing converting multiclass VPatNConvertFP2IVL_W { // Reuse the same list of types used in the widening nodes, but just swap the // direction of types around so we're converting from Wti -> Vti foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, (fwti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; } } multiclass VPatNConvertFP2I_RM_VL_W { foreach vtiToWti = AllWidenableIntToFloatVectors in { defvar vti = vtiToWti.Vti; defvar fwti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(vti.Vector (vop (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask VMV0:$vm), (XLenVT timm:$frm), VLOpFrag)), (!cast(instruction_name#"_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, (fwti.Mask VMV0:$vm), timm:$frm, GPR:$vl, vti.Log2SEW, TA_MA)>; } } multiclass VPatNConvertI2FPVL_W_RM { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), (iwti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, (iwti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, fvti.Log2SEW, TA_MA)>; } } multiclass VPatNConvertI2FP_RM_VL_W { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar iwti = GetIntVTypeInfo.Vti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (vop (iwti.Vector iwti.RegClass:$rs1), (iwti.Mask VMV0:$vm), (XLenVT timm:$frm), VLOpFrag)), (!cast(instruction_name#"_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), iwti.RegClass:$rs1, (iwti.Mask VMV0:$vm), timm:$frm, GPR:$vl, fvti.Log2SEW, TA_MA)>; } } multiclass VPatReductionVL { foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { defvar vti_m1 = !cast(!if(is_float, "VF", "VI") # vti.SEW # "M1"); let Predicates = GetVTypePredicates.Predicates in { def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (vti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } } multiclass VPatReductionVL_RM { foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in { defvar vti_m1 = !cast(!if(is_float, "VF", "VI") # vti.SEW # "M1"); let Predicates = GetVTypePredicates.Predicates in { def: Pat<(vti_m1.Vector (vop (vti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (vti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), (vti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } } multiclass VPatBinaryVL_WV_WX_WI { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat< (vti.Vector (riscv_trunc_vector_vl (op (wti.Vector wti.RegClass:$rs2), (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))), (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WV_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat< (vti.Vector (riscv_trunc_vector_vl (op (wti.Vector wti.RegClass:$rs2), (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))), (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat< (vti.Vector (riscv_trunc_vector_vl (op (wti.Vector wti.RegClass:$rs2), (wti.Vector (SplatPat_uimm5 uimm5:$rs1))), (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, uimm5:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; } } } multiclass VPatWidenReductionVL { foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; defvar wti_m1 = !cast(!if(is_float, "VF", "VI") # wti.SEW # "M1"); let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1))), VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), (wti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } } multiclass VPatWidenReductionVL_Ext_VL { foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; defvar wti_m1 = !cast(!if(is_float, "VF", "VI") # wti.SEW # "M1"); let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue))), VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), (wti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } } multiclass VPatWidenReductionVL_Ext_VL_RM { foreach vtiToWti = !if(is_float, AllWidenableFloatVectors, AllWidenableIntVectors) in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; defvar wti_m1 = !cast(!if(is_float, "VF", "VI") # wti.SEW # "M1"); let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def: Pat<(wti_m1.Vector (vop (wti_m1.Vector VR:$passthru), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), (XLenVT srcvalue))), VR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))), (!cast(instruction_name#"_VS_"#vti.LMul.MX#"_E"#vti.SEW#"_MASK") (wti_m1.Vector VR:$passthru), (vti.Vector vti.RegClass:$rs1), (wti_m1.Vector VR:$rs2), (vti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } } multiclass VPatBinaryFPWVL_VV_VF { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar vti = fvtiToFWti.Vti; defvar wti = fvtiToFWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : VPatBinaryVL_V; def : VPatBinaryVL_VF; } } } multiclass VPatBinaryFPWVL_VV_VF_RM { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar vti = fvtiToFWti.Vti; defvar wti = fvtiToFWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : VPatBinaryVL_V_RM; def : VPatBinaryVL_VF_RM; } } } multiclass VPatBinaryFPWVL_VV_VF_WV_WF : VPatBinaryFPWVL_VV_VF { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar vti = fvtiToFWti.Vti; defvar wti = fvtiToFWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { defm : VPatTiedBinaryNoMaskVL_V; def : VPatBinaryVL_V; def : VPatBinaryVL_VF; } } } multiclass VPatBinaryFPWVL_VV_VF_WV_WF_RM< SDNode vop, SDNode vop_w, string instruction_name, bit isSEWAware = 0> : VPatBinaryFPWVL_VV_VF_RM { foreach fvtiToFWti = AllWidenableFloatVectors in { defvar vti = fvtiToFWti.Vti; defvar wti = fvtiToFWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { defm : VPatTiedBinaryNoMaskVL_V_RM; def : VPatBinaryVL_V_RM; def : VPatBinaryVL_VF_RM; } } } multiclass VPatNarrowShiftSplatExt_WX { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat< (vti.Vector (riscv_trunc_vector_vl (op (wti.Vector wti.RegClass:$rs2), (wti.Vector (extop (vti.Vector (SplatPat (XLenVT GPR:$rs1))), (vti.Mask true_mask), VLOpFrag)), srcvalue, (wti.Mask true_mask), VLOpFrag), (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, GPR:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; } } multiclass VPatNarrowShiftExtVL_WV { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat< (vti.Vector (riscv_trunc_vector_vl (op (wti.Vector wti.RegClass:$rs2), (wti.Vector (extop (vti.Vector vti.RegClass:$rs1), (vti.Mask true_mask), VLOpFrag)), srcvalue, (vti.Mask true_mask), VLOpFrag), (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_WV_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; } } multiclass VPatNarrowShiftVL_WV { defm : VPatNarrowShiftExtVL_WV; defm : VPatNarrowShiftExtVL_WV; } multiclass VPatMultiplyAddVL_VV_VX { foreach vti = AllIntegerVectors in { defvar suffix = vti.LMul.MX; let Predicates = GetVTypePredicates.Predicates in { // NOTE: We choose VMADD because it has the most commuting freedom. So it // works best with how TwoAddressInstructionPass tries commuting. def : Pat<(vti.Vector (op vti.RegClass:$rs2, (riscv_mul_vl_oneuse vti.RegClass:$rs1, vti.RegClass:$rd, srcvalue, (vti.Mask true_mask), VLOpFrag), srcvalue, (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_VV_"# suffix) vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; // The choice of VMADD here is arbitrary, vmadd.vx and vmacc.vx are equally // commutable. def : Pat<(vti.Vector (op vti.RegClass:$rs2, (riscv_mul_vl_oneuse (SplatPat XLenVT:$rs1), vti.RegClass:$rd, srcvalue, (vti.Mask true_mask), VLOpFrag), srcvalue, (vti.Mask true_mask), VLOpFrag)), (!cast(instruction_name#"_VX_" # suffix) vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } multiclass VPatWidenMultiplyAddVL_VV_VX { foreach vtiTowti = AllWidenableIntVectors in { defvar vti = vtiTowti.Vti; defvar wti = vtiTowti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(vwmacc_op (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2), (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm), VLOpFrag), (!cast(instr_name#"_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vwmacc_op (SplatPat XLenVT:$rs1), (vti.Vector vti.RegClass:$rs2), (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm), VLOpFrag), (!cast(instr_name#"_VX_"#vti.LMul.MX#"_MASK") wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } multiclass VPatNarrowShiftSplat_WX_WI { foreach vtiTowti = AllWidenableIntVectors in { defvar vti = vtiTowti.Vti; defvar wti = vtiTowti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector (op wti.RegClass:$rs1, (SplatPat XLenVT:$rs2), srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), (!cast(instruction_name#"_WX_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector (op wti.RegClass:$rs1, (SplatPat_uimm5 uimm5:$rs2), srcvalue, true_mask, VLOpFrag)), true_mask, VLOpFrag)), (!cast(instruction_name#"_WI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, uimm5:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; } } } multiclass VPatFPMulAddVL_VV_VF { foreach vti = AllFloatVectors in { defvar suffix = vti.LMul.MX; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_VV_"# suffix #"_MASK") vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; } } } multiclass VPatFPMulAddVL_VV_VF_RM { foreach vti = AllFloatVectors in { defvar suffix = vti.LMul.MX # "_E" # vti.SEW; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (vop vti.RegClass:$rs1, vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_VV_"# suffix #"_MASK") vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (vop (SplatFPOp vti.ScalarRegClass:$rs1), vti.RegClass:$rd, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_V" # vti.ScalarSuffix # "_" # suffix # "_MASK") vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, vti.Log2SEW, TA_MA)>; } } } multiclass VPatWidenFPMulAccVL_VV_VF_RM vtiToWtis = AllWidenableFloatVectors> { foreach vtiToWti = vtiToWtis in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; defvar suffix = vti.LMul.MX # "_E" # vti.SEW; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates, !if(!eq(vti.Scalar, bf16), [HasStdExtZvfbfwma], [])) in { def : Pat<(vop (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2), (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm), VLOpFrag), (!cast(instruction_name#"_VV_"#suffix#"_MASK") wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(vop (vti.Vector (SplatFPOp vti.ScalarRegClass:$rs1)), (vti.Vector vti.RegClass:$rs2), (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm), VLOpFrag), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#suffix#"_MASK") wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, vti.Log2SEW, TA_MA)>; } } } multiclass VPatSlideVL_VX_VI { foreach vti = AllVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), (vti.Vector vti.RegClass:$rs1), uimm5:$rs2, (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))), (!cast(instruction_name#"_VI_"#vti.LMul.MX#"_MASK") vti.RegClass:$rd, vti.RegClass:$rs1, uimm5:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rd), (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag, (XLenVT timm:$policy))), (!cast(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") vti.RegClass:$rd, vti.RegClass:$rs1, GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>; } } } multiclass VPatSlide1VL_VX { foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), GPR:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") vti.RegClass:$rs3, vti.RegClass:$rs1, GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>; } } } multiclass VPatSlide1VL_VF { foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (vop (vti.Vector vti.RegClass:$rs3), (vti.Vector vti.RegClass:$rs1), vti.Scalar:$rs2, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(instruction_name#"_V"#vti.ScalarSuffix#"_"#vti.LMul.MX#"_MASK") vti.RegClass:$rs3, vti.RegClass:$rs1, vti.Scalar:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TU_MU)>; } } } multiclass VPatAVGADDVL_VV_VX_RM { foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vop (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vop (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatPat (XLenVT GPR:$rs2))), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, GPR:$rs2, (vti.Mask VMV0:$vm), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } } //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// // 11. Vector Integer Arithmetic Instructions // 11.1. Vector Single-Width Integer Add and Subtract defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX; foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { // Handle VRSUB specially since it's the only integer binary op with // reversed pattern operands def : Pat<(riscv_sub_vl (vti.Vector (SplatPat (XLenVT GPR:$rs2))), (vti.Vector vti.RegClass:$rs1), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVRSUB_VX_"# vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, GPR:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_sub_vl (vti.Vector (SplatPat_simm5 simm5:$rs2)), (vti.Vector vti.RegClass:$rs1), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVRSUB_VI_"# vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, simm5:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; // Match VSUB with a small immediate to vadd.vi by negating the immediate. def : Pat<(riscv_sub_vl (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatPat_simm5_plus1_nodec simm5_plus1:$rs2)), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVADD_VI_"#vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, (NegImm simm5_plus1:$rs2), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } // (add v, C) -> (sub v, -C) if -C cheaper to materialize foreach vti = I64IntegerVectors in { let Predicates = [HasVInstructionsI64] in { def : Pat<(riscv_add_vl (vti.Vector vti.RegClass:$rs1), (vti.Vector (SplatPat_imm64_neg (i64 GPR:$rs2))), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVSUB_VX_"#vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, negImm:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } // 11.2. Vector Widening Integer Add/Subtract defm : VPatBinaryWVL_VV_VX_WV_WX; defm : VPatBinaryWVL_VV_VX_WV_WX; defm : VPatBinaryWVL_VV_VX_WV_WX; defm : VPatBinaryWVL_VV_VX_WV_WX; // shl_vl (ext_vl v, splat 1) is a special case of widening add. foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(riscv_shl_vl (wti.Vector (riscv_sext_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask VMV0:$vm), VLOpFrag)), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, VLOpFrag)), wti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVWADD_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_shl_vl (wti.Vector (riscv_zext_vl_oneuse (vti.Vector vti.RegClass:$rs1), (vti.Mask VMV0:$vm), VLOpFrag)), (wti.Vector (riscv_vmv_v_x_vl (wti.Vector undef), 1, VLOpFrag)), wti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVWADDU_VV_"#vti.LMul.MX#"_MASK") wti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } // DAGCombiner::hoistLogicOpWithSameOpcodeHands may hoist disjoint ors // to (ext (or disjoint (a, b))) multiclass VPatWidenOrDisjointVL_VV_VX { foreach vtiToWti = AllWidenableIntVectors in { defvar vti = vtiToWti.Vti; defvar wti = vtiToWti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { def : Pat<(wti.Vector (extop (vti.Vector (riscv_or_vl_is_add_oneuse vti.RegClass:$rs2, vti.RegClass:$rs1, undef, srcvalue, srcvalue)), VMV0:$vm, VLOpFrag)), (!cast(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, vti.RegClass:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(wti.Vector (extop (vti.Vector (riscv_or_vl_is_add_oneuse vti.RegClass:$rs2, (SplatPat (XLenVT GPR:$rs1)), undef, srcvalue, srcvalue)), VMV0:$vm, VLOpFrag)), (!cast(instruction_name#"_VX_"#vti.LMul.MX#"_MASK") (wti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, GPR:$rs1, VMV0:$vm, GPR:$vl, vti.Log2SEW, TA_MA)>; } } } defm : VPatWidenOrDisjointVL_VV_VX; defm : VPatWidenOrDisjointVL_VV_VX; // 11.3. Vector Integer Extension defm : VPatExtendVL_V; defm : VPatExtendVL_V; defm : VPatExtendVL_V; defm : VPatExtendVL_V; defm : VPatExtendVL_V; defm : VPatExtendVL_V; // 11.5. Vector Bitwise Logical Instructions defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX_VI; // 11.6. Vector Single-Width Bit Shift Instructions defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX_VI; foreach vti = AllIntegerVectors in { // Emit shift by 1 as an add since it might be faster. let Predicates = GetVTypePredicates.Predicates in def : Pat<(riscv_shl_vl (vti.Vector vti.RegClass:$rs1), (riscv_vmv_v_x_vl (vti.Vector undef), 1, (XLenVT srcvalue)), srcvalue, (vti.Mask true_mask), VLOpFrag), (!cast("PseudoVADD_VV_"# vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs1, GPR:$vl, vti.Log2SEW, TA_MA)>; } // 11.7. Vector Narrowing Integer Right Shift Instructions defm : VPatBinaryVL_WV_WX_WI; defm : VPatBinaryVL_WV_WX_WI; defm : VPatNarrowShiftSplat_WX_WI; defm : VPatNarrowShiftSplat_WX_WI; defm : VPatNarrowShiftSplatExt_WX; defm : VPatNarrowShiftSplatExt_WX; defm : VPatNarrowShiftSplatExt_WX; defm : VPatNarrowShiftSplatExt_WX; defm : VPatNarrowShiftVL_WV; defm : VPatNarrowShiftVL_WV; foreach vtiTowti = AllWidenableIntVectors in { defvar vti = vtiTowti.Vti; defvar wti = vtiTowti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(vti.Vector (riscv_trunc_vector_vl (wti.Vector wti.RegClass:$rs1), (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVNSRL_WI_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; } // 11.8. Vector Integer Comparison Instructions foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { defm : VPatIntegerSetCCVL_VV; defm : VPatIntegerSetCCVL_VV; defm : VPatIntegerSetCCVL_VV_Swappable; defm : VPatIntegerSetCCVL_VV_Swappable; defm : VPatIntegerSetCCVL_VV_Swappable; defm : VPatIntegerSetCCVL_VV_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; defm : VPatIntegerSetCCVL_VX_Swappable; // There is no VMSGE(U)_VX instruction defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; defm : VPatIntegerSetCCVL_VI_Swappable; } } // foreach vti = AllIntegerVectors // 11.9. Vector Integer Min/Max Instructions defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; // 11.10. Vector Single-Width Integer Multiply Instructions defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; // vsmul.vv and vsmul.vx are not included in EEW=64 in Zve64*. let Predicates = [HasVInstructionsFullMultiply] in { defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; } // 11.11. Vector Integer Divide Instructions defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; // 11.12. Vector Widening Integer Multiply Instructions defm : VPatBinaryWVL_VV_VX; defm : VPatBinaryWVL_VV_VX; defm : VPatBinaryWVL_VV_VX; // 11.13 Vector Single-Width Integer Multiply-Add Instructions defm : VPatMultiplyAddVL_VV_VX; defm : VPatMultiplyAddVL_VV_VX; // 11.14. Vector Widening Integer Multiply-Add Instructions defm : VPatWidenMultiplyAddVL_VV_VX; defm : VPatWidenMultiplyAddVL_VV_VX; defm : VPatWidenMultiplyAddVL_VV_VX; foreach vtiTowti = AllWidenableIntVectors in { defvar vti = vtiTowti.Vti; defvar wti = vtiTowti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(riscv_vwmaccsu_vl (vti.Vector vti.RegClass:$rs1), (SplatPat XLenVT:$rs2), (wti.Vector wti.RegClass:$rd), (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVWMACCUS_VX_"#vti.LMul.MX#"_MASK") wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } // 11.15. Vector Integer Merge Instructions foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask VMV0:$vm), vti.RegClass:$rs1, vti.RegClass:$rs2, vti.RegClass:$passthru, VLOpFrag)), (!cast("PseudoVMERGE_VVM_"#vti.LMul.MX) vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask VMV0:$vm), (SplatPat XLenVT:$rs1), vti.RegClass:$rs2, vti.RegClass:$passthru, VLOpFrag)), (!cast("PseudoVMERGE_VXM_"#vti.LMul.MX) vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_vmerge_vl (vti.Mask VMV0:$vm), (SplatPat_simm5 simm5:$rs1), vti.RegClass:$rs2, vti.RegClass:$passthru, VLOpFrag)), (!cast("PseudoVMERGE_VIM_"#vti.LMul.MX) vti.RegClass:$passthru, vti.RegClass:$rs2, simm5:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW)>; } } // 11.16. Vector Integer Move Instructions foreach vti = AllVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vmv_v_v_vl vti.RegClass:$passthru, vti.RegClass:$rs2, VLOpFrag)), (!cast("PseudoVMV_V_V_"#vti.LMul.MX) vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; } foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, GPR:$rs2, VLOpFrag)), (!cast("PseudoVMV_V_X_"#vti.LMul.MX) vti.RegClass:$passthru, GPR:$rs2, GPR:$vl, vti.Log2SEW, TU_MU)>; defvar ImmPat = !cast("sew"#vti.SEW#"simm5"); def : Pat<(vti.Vector (riscv_vmv_v_x_vl vti.RegClass:$passthru, (ImmPat simm5:$imm5), VLOpFrag)), (!cast("PseudoVMV_V_I_"#vti.LMul.MX) vti.RegClass:$passthru, simm5:$imm5, GPR:$vl, vti.Log2SEW, TU_MU)>; } } } // 12. Vector Fixed-Point Arithmetic Instructions // 12.1. Vector Single-Width Saturating Add and Subtract defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX_VI; defm : VPatBinaryVL_VV_VX; defm : VPatBinaryVL_VV_VX; // 12.2. Vector Single-Width Averaging Add and Subtract defm : VPatAVGADDVL_VV_VX_RM; defm : VPatAVGADDVL_VV_VX_RM; defm : VPatAVGADDVL_VV_VX_RM; defm : VPatAVGADDVL_VV_VX_RM; // 12.5. Vector Narrowing Fixed-Point Clip Instructions foreach vtiTowti = AllWidenableIntVectors in { defvar vti = vtiTowti.Vti; defvar wti = vtiTowti.Wti; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in { // Rounding mode here is arbitrary since we aren't shifting out any bits. def : Pat<(vti.Vector (riscv_trunc_vector_vl_ssat (wti.Vector wti.RegClass:$rs1), (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVNCLIP_WI_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, (vti.Mask VMV0:$vm), /*RNU*/0, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(vti.Vector (riscv_trunc_vector_vl_usat (wti.Vector wti.RegClass:$rs1), (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVNCLIPU_WI_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), wti.RegClass:$rs1, 0, (vti.Mask VMV0:$vm), /*RNU*/0, GPR:$vl, vti.Log2SEW, TA_MA)>; } } // 13. Vector Floating-Point Instructions // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions defm : VPatBinaryFPVL_VV_VF_RM; defm : VPatBinaryFPVL_VV_VF_RM; defm : VPatBinaryFPVL_R_VF_RM; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM; defm : VPatBinaryFPWVL_VV_VF_WV_WF_RM; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions defm : VPatBinaryFPVL_VV_VF_RM; defm : VPatBinaryFPVL_VV_VF_RM; defm : VPatBinaryFPVL_R_VF_RM; // 13.5. Vector Widening Floating-Point Multiply Instructions defm : VPatBinaryFPWVL_VV_VF_RM; // 13.6 Vector Single-Width Floating-Point Fused Multiply-Add Instructions. defm : VPatFPMulAddVL_VV_VF_RM; defm : VPatFPMulAddVL_VV_VF_RM; defm : VPatFPMulAddVL_VV_VF_RM; defm : VPatFPMulAddVL_VV_VF_RM; // 13.7. Vector Widening Floating-Point Fused Multiply-Add Instructions defm : VPatWidenFPMulAccVL_VV_VF_RM; defm : VPatWidenFPMulAccVL_VV_VF_RM; defm : VPatWidenFPMulAccVL_VV_VF_RM; defm : VPatWidenFPMulAccVL_VV_VF_RM; // 13.11. Vector Floating-Point MIN/MAX Instructions defm : VPatBinaryFPVL_VV_VF; defm : VPatBinaryFPVL_VV_VF; // 13.13. Vector Floating-Point Compare Instructions defm : VPatFPSetCCVL_VV_VF_FV; defm : VPatFPSetCCVL_VV_VF_FV; defm : VPatFPSetCCVL_VV_VF_FV; defm : VPatFPSetCCVL_VV_VF_FV; defm : VPatFPSetCCVL_VV_VF_FV; defm : VPatFPSetCCVL_VV_VF_FV; defm : VPatFPSetCCVL_VV_VF_FV; defm : VPatFPSetCCVL_VV_VF_FV; foreach vti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { // 13.8. Vector Floating-Point Square-Root Instruction def : Pat<(any_riscv_fsqrt_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVFSQRT_V_"# vti.LMul.MX # "_E" # vti.SEW # "_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, (vti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, vti.Log2SEW, TA_MA)>; // 13.12. Vector Floating-Point Sign-Injection Instructions def : Pat<(riscv_fabs_vl (vti.Vector vti.RegClass:$rs), (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVFSGNJX_VV_"# vti.LMul.MX #"_E"#vti.SEW#"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, vti.RegClass:$rs, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; // Handle fneg with VFSGNJN using the same input for both operands. def : Pat<(riscv_fneg_vl (vti.Vector vti.RegClass:$rs), (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs, vti.RegClass:$rs, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (vti.Vector vti.RegClass:$rs2), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVFSGNJ_VV_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (riscv_fneg_vl vti.RegClass:$rs2, (vti.Mask true_mask), VLOpFrag), srcvalue, (vti.Mask true_mask), VLOpFrag), (!cast("PseudoVFSGNJN_VV_"# vti.LMul.MX#"_E"#vti.SEW) (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW, TA_MA)>; def : Pat<(riscv_fcopysign_vl (vti.Vector vti.RegClass:$rs1), (SplatFPOp vti.ScalarRegClass:$rs2), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVFSGNJ_V"#vti.ScalarSuffix#"_"# vti.LMul.MX#"_E"#vti.SEW#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs1, vti.ScalarRegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; // Rounding without exception to implement nearbyint. def : Pat<(any_riscv_vfround_noexcept_vl (vti.Vector vti.RegClass:$rs1), (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVFROUND_NOEXCEPT_V_" # vti.LMul.MX #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; // 14.14. Vector Floating-Point Classify Instruction def : Pat<(riscv_fclass_vl (vti.Vector vti.RegClass:$rs2), (vti.Mask VMV0:$vm), VLOpFrag), (!cast("PseudoVFCLASS_V_"# vti.LMul.MX #"_MASK") (vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs2, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TA_MA)>; } } // Floating-point vselects: // 11.15. Vector Integer Merge Instructions // 13.15. Vector Floating-Point Merge Instruction foreach fvti = AllFloatAndBFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1, fvti.RegClass:$rs2, fvti.RegClass:$passthru, VLOpFrag)), (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; } } foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), fvti.RegClass:$rs2, fvti.RegClass:$passthru, VLOpFrag)), (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX) fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), (SplatFPOp (fvti.Scalar fpimm0)), fvti.RegClass:$rs2, fvti.RegClass:$passthru, VLOpFrag)), (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; } let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), (SplatFPOp fvti.ScalarRegClass:$rs1), fvti.RegClass:$rs2, fvti.RegClass:$passthru, VLOpFrag)), (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) fvti.RegClass:$passthru, fvti.RegClass:$rs2, (fvti.Scalar fvti.ScalarRegClass:$rs1), (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; } } foreach fvti = AllFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { // 13.16. Vector Floating-Point Move Instruction // If we're splatting fpimm0, use vmv.v.x vd, x0. def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl fvti.Vector:$passthru, (fvti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), VLOpFrag)), (!cast("PseudoVMV_V_X_"#fvti.LMul.MX) $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; } } foreach fvti = AllFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), (!cast("PseudoVFMV_V_" # fvti.ScalarSuffix # "_" # fvti.LMul.MX) $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl, fvti.Log2SEW, TU_MU)>; } } // 13.17. Vector Single-Width Floating-Point/Integer Type-Convert Instructions defm : VPatConvertFP2I_RM_VL_V; defm : VPatConvertFP2I_RM_VL_V; defm : VPatConvertFP2IVL_V; defm : VPatConvertFP2IVL_V; defm : VPatConvertI2FPVL_V_RM; defm : VPatConvertI2FPVL_V_RM; defm : VPatConvertI2FP_RM_VL_V; defm : VPatConvertI2FP_RM_VL_V; // 13.18. Widening Floating-Point/Integer Type-Convert Instructions defm : VPatWConvertFP2I_RM_VL_V; defm : VPatWConvertFP2I_RM_VL_V; defm : VPatWConvertFP2IVL_V; defm : VPatWConvertFP2IVL_V; defm : VPatWConvertI2FPVL_V; defm : VPatWConvertI2FPVL_V; foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; // Define vfwcvt.f.f.v for f16 when Zvfhmin is enable. let Predicates = !listconcat(GetVTypeMinimalPredicates.Predicates, GetVTypeMinimalPredicates.Predicates) in def : Pat<(fwti.Vector (any_riscv_fpextend_vl (fvti.Vector fvti.RegClass:$rs1), (fvti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVFWCVT_F_F_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MA)>; } // 13.19 Narrowing Floating-Point/Integer Type-Convert Instructions defm : VPatNConvertFP2I_RM_VL_W; defm : VPatNConvertFP2I_RM_VL_W; defm : VPatNConvertFP2IVL_W; defm : VPatNConvertFP2IVL_W; defm : VPatNConvertI2FPVL_W_RM; defm : VPatNConvertI2FPVL_W_RM; defm : VPatNConvertI2FP_RM_VL_W; defm : VPatNConvertI2FP_RM_VL_W; foreach fvtiToFWti = AllWidenableFloatVectors in { defvar fvti = fvtiToFWti.Vti; defvar fwti = fvtiToFWti.Wti; // Define vfncvt.f.f.w for f16 when Zvfhmin is enable. let Predicates = !listconcat(GetVTypeMinimalPredicates.Predicates, GetVTypeMinimalPredicates.Predicates) in { def : Pat<(fvti.Vector (any_riscv_fpround_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVFNCVT_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, (fwti.Mask VMV0:$vm), // Value to indicate no rounding mode change in // RISCVInsertReadWriteCSR FRM_DYN, GPR:$vl, fvti.Log2SEW, TA_MA)>; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(fvti.Vector (any_riscv_fncvt_rod_vl (fwti.Vector fwti.RegClass:$rs1), (fwti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVFNCVT_ROD_F_F_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, (fwti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TA_MA)>; } } // 14. Vector Reduction Operations // 14.1. Vector Single-Width Integer Reduction Instructions defm : VPatReductionVL; defm : VPatReductionVL; defm : VPatReductionVL; defm : VPatReductionVL; defm : VPatReductionVL; defm : VPatReductionVL; defm : VPatReductionVL; defm : VPatReductionVL; // 14.2. Vector Widening Integer Reduction Instructions defm : VPatWidenReductionVL; defm : VPatWidenReductionVL; defm : VPatWidenReductionVL_Ext_VL; defm : VPatWidenReductionVL; defm : VPatWidenReductionVL_Ext_VL; // 14.3. Vector Single-Width Floating-Point Reduction Instructions defm : VPatReductionVL_RM; defm : VPatReductionVL_RM; defm : VPatReductionVL; defm : VPatReductionVL; // 14.4. Vector Widening Floating-Point Reduction Instructions defm : VPatWidenReductionVL_Ext_VL_RM; defm : VPatWidenReductionVL_Ext_VL_RM; // 15. Vector Mask Instructions foreach mti = AllMasks in { let Predicates = [HasVInstructions] in { // 15.1 Vector Mask-Register Logical Instructions def : Pat<(mti.Mask (riscv_vmset_vl VLOpFrag)), (!cast("PseudoVMSET_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmclr_vl VLOpFrag)), (!cast("PseudoVMCLR_M_" # mti.BX) GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag)), (!cast("PseudoVMAND_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag)), (!cast("PseudoVMOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag)), (!cast("PseudoVMXOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1, (riscv_vmnot_vl VR:$rs2, VLOpFrag), VLOpFrag)), (!cast("PseudoVMANDN_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1, (riscv_vmnot_vl VR:$rs2, VLOpFrag), VLOpFrag)), (!cast("PseudoVMORN_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; // XOR is associative so we need 2 patterns for VMXNOR. def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1, VLOpFrag), VR:$rs2, VLOpFrag)), (!cast("PseudoVMXNOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmand_vl VR:$rs1, VR:$rs2, VLOpFrag), VLOpFrag)), (!cast("PseudoVMNAND_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmor_vl VR:$rs1, VR:$rs2, VLOpFrag), VLOpFrag)), (!cast("PseudoVMNOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; def : Pat<(mti.Mask (riscv_vmnot_vl (riscv_vmxor_vl VR:$rs1, VR:$rs2, VLOpFrag), VLOpFrag)), (!cast("PseudoVMXNOR_MM_" # mti.BX) VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>; // Match the not idiom to the vmnot.m pseudo. def : Pat<(mti.Mask (riscv_vmnot_vl VR:$rs, VLOpFrag)), (!cast("PseudoVMNAND_MM_" # mti.BX) VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>; // 15.2 Vector count population in mask vcpop.m def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVCPOP_M_" # mti.BX # "_MASK") VR:$rs2, (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW)>; // 15.3 vfirst find-first-set mask bit def : Pat<(XLenVT (riscv_vfirst_vl (mti.Mask VR:$rs2), (mti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVFIRST_M_" # mti.BX # "_MASK") VR:$rs2, (mti.Mask VMV0:$vm), GPR:$vl, mti.Log2SEW)>; } } // 16. Vector Permutation Instructions // 16.1. Integer Scalar Move Instructions foreach vti = NoGroupIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$passthru), vti.ScalarRegClass:$rs1, VLOpFrag)), (PseudoVMV_S_X $passthru, vti.ScalarRegClass:$rs1, GPR:$vl, vti.Log2SEW)>; } } // 16.4. Vector Register Gather Instruction foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, vti.RegClass:$rs1, vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm, vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$imm, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } // emul = lmul * 16 / sew defvar vlmul = vti.LMul; defvar octuple_lmul = vlmul.octuple; defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { defvar emul_str = octuple_to_str.ret; defvar ivti = !cast("VI16" # emul_str); defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; let Predicates = GetVTypePredicates.Predicates in def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, (ivti.Vector ivti.RegClass:$rs1), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, ivti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } // 16.2. Floating-Point Scalar Move Instructions foreach vti = NoGroupFloatVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), (vti.Scalar (fpimm0)), VLOpFrag)), (PseudoVMV_S_X $passthru, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), (vti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), VLOpFrag)), (PseudoVMV_S_X $passthru, GPR:$imm, GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), vti.ScalarRegClass:$rs1, VLOpFrag)), (!cast("PseudoVFMV_S_"#vti.ScalarSuffix) vti.RegClass:$passthru, (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; } } foreach vti = AllFloatAndBFloatVectors in { defvar ivti = GetIntVTypeInfo.Vti; let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2, (ivti.Vector vti.RegClass:$rs1), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVRGATHER_VV_"# vti.LMul.MX#"_E"# vti.SEW#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, vti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, GPR:$rs1, vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVRGATHER_VX_"# vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, GPR:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; def : Pat<(vti.Vector (riscv_vrgather_vx_vl vti.RegClass:$rs2, uimm5:$imm, vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVRGATHER_VI_"# vti.LMul.MX#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, uimm5:$imm, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } defvar vlmul = vti.LMul; defvar octuple_lmul = vlmul.octuple; defvar octuple_emul = !srl(!mul(octuple_lmul, 16), vti.Log2SEW); if !and(!ge(octuple_emul, 1), !le(octuple_emul, 64)) then { defvar emul_str = octuple_to_str.ret; defvar ivti = !cast("VI16" # emul_str); defvar inst = "PseudoVRGATHEREI16_VV_" # vti.LMul.MX # "_E" # vti.SEW # "_" # emul_str; let Predicates = !listconcat(GetVTypePredicates.Predicates, GetVTypePredicates.Predicates) in def : Pat<(vti.Vector (riscv_vrgatherei16_vv_vl vti.RegClass:$rs2, (ivti.Vector ivti.RegClass:$rs1), vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag)), (!cast(inst#"_MASK") vti.RegClass:$passthru, vti.RegClass:$rs2, ivti.RegClass:$rs1, (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } //===----------------------------------------------------------------------===// // Miscellaneous RISCVISD SDNodes //===----------------------------------------------------------------------===// // Matches the semantics of the vid.v instruction, with a mask and VL // operand. let HasMaskOp = true in def riscv_vid_vl : RVSDNode<"VID_VL", SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCVecEltisVT<1, i1>, SDTCisSameNumEltsAs<0, 1>, SDTCisVT<2, XLenVT>]>>; def SDTRVVSlide : SDTypeProfile<1, 6, [ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT>, SDTCisVT<6, XLenVT> ]>; def SDTRVVSlide1 : SDTypeProfile<1, 5, [ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisInt<0>, SDTCisVT<3, XLenVT>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT> ]>; def SDTRVVFSlide1 : SDTypeProfile<1, 5, [ SDTCisVec<0>, SDTCisSameAs<1, 0>, SDTCisSameAs<2, 0>, SDTCisFP<0>, SDTCisEltOfVec<3, 0>, SDTCVecEltisVT<4, i1>, SDTCisSameNumEltsAs<0, 4>, SDTCisVT<5, XLenVT> ]>; let HasMaskOp = true in { // Matches the semantics of vslideup/vslidedown. The first operand is the // pass-thru operand, the second is the source vector, the third is the XLenVT // index (either constant or non-constant), the fourth is the mask, the fifth // is the VL and the sixth is the policy. def riscv_slideup_vl : RVSDNode<"VSLIDEUP_VL", SDTRVVSlide, []>; def riscv_slidedown_vl : RVSDNode<"VSLIDEDOWN_VL", SDTRVVSlide, []>; // Matches the semantics of vslide1up/slide1down. The first operand is // passthru operand, the second is source vector, third is the XLenVT scalar // value. The fourth and fifth operands are the mask and VL operands. def riscv_slide1up_vl : RVSDNode<"VSLIDE1UP_VL", SDTRVVSlide1, []>; def riscv_slide1down_vl : RVSDNode<"VSLIDE1DOWN_VL", SDTRVVSlide1, []>; // Matches the semantics of vfslide1up/vfslide1down. The first operand is // passthru operand, the second is source vector, third is a scalar value // whose type matches the element type of the vectors. The fourth and fifth // operands are the mask and VL operands. def riscv_fslide1up_vl : RVSDNode<"VFSLIDE1UP_VL", SDTRVVFSlide1, []>; def riscv_fslide1down_vl : RVSDNode<"VFSLIDE1DOWN_VL", SDTRVVFSlide1, []>; } // let HasMaskOp = true foreach vti = AllIntegerVectors in { let Predicates = GetVTypePredicates.Predicates in { def : Pat<(vti.Vector (riscv_vid_vl (vti.Mask VMV0:$vm), VLOpFrag)), (!cast("PseudoVID_V_"#vti.LMul.MX#"_MASK") (vti.Vector (IMPLICIT_DEF)), (vti.Mask VMV0:$vm), GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>; } } defm : VPatSlideVL_VX_VI; defm : VPatSlideVL_VX_VI; defm : VPatSlide1VL_VX; defm : VPatSlide1VL_VF; defm : VPatSlide1VL_VX; defm : VPatSlide1VL_VF;