diff options
author | Fraser Cormack <fraser@codeplay.com> | 2021-06-14 11:00:25 +0100 |
---|---|---|
committer | Fraser Cormack <fraser@codeplay.com> | 2021-06-17 10:04:00 +0100 |
commit | fed1503e855a1e3cf936fa0866f099bf1c8c9416 (patch) | |
tree | c882486980db0efe6746b6fcbb6461559a38be40 | |
parent | 05e95d2dd74973dd5163b7d44828fac61e416452 (diff) | |
download | llvm-fed1503e855a1e3cf936fa0866f099bf1c8c9416.zip llvm-fed1503e855a1e3cf936fa0866f099bf1c8c9416.tar.gz llvm-fed1503e855a1e3cf936fa0866f099bf1c8c9416.tar.bz2 |
[RISCV][VP] Lower FP VP ISD nodes to RVV instructions
With the exception of `frem`, this patch supports the current set of VP
floating-point binary intrinsics by lowering them to to RVV instructions. It
does so by using the existing `RISCVISD *_VL` custom nodes as an intermediate
layer. Both scalable and fixed-length vectors are supported by using this
method.
The `frem` node is unsupported due to a lack of available instructions. For
fixed-length vectors we could scalarize but that option is not (currently)
available for scalable-vector types. The support is intentionally left out so
it equivalent for both vector types.
The matching of vector/scalar forms is currently lacking, as scalable vector
types do not lower to the custom `VFMV_V_F_VL` node. We could either make
floating-point scalable vector splats lower to this node, or support the
matching of multiple kinds of splat via a `ComplexPattern`, much like we do for
integer types.
Reviewed By: rogfer01
Differential Revision: https://reviews.llvm.org/D104237
-rw-r--r-- | llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 17 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll | 629 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll | 629 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll | 629 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll | 365 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll | 365 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll | 629 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll | 815 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll | 815 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll | 815 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll | 485 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll | 485 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll | 815 |
13 files changed, 7493 insertions, 0 deletions
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 45f7f10..32046a4 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -420,6 +420,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, ISD::VP_SREM, ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR, ISD::VP_SHL}; + static unsigned FloatingPointVPOps[] = {ISD::VP_FADD, ISD::VP_FSUB, + ISD::VP_FMUL, ISD::VP_FDIV}; + if (!Subtarget.is64Bit()) { // We must custom-lower certain vXi64 operations on RV32 due to the vector // element type being illegal. @@ -603,6 +606,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); setOperationAction(ISD::VECTOR_REVERSE, VT, Custom); + + for (unsigned VPOpc : FloatingPointVPOps) + setOperationAction(VPOpc, VT, Custom); }; // Sets common extload/truncstore actions on RVV floating-point vector @@ -797,6 +803,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom); setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); + + for (unsigned VPOpc : FloatingPointVPOps) + setOperationAction(VPOpc, VT, Custom); } // Custom-legalize bitcasts from fixed-length vectors to scalar types. @@ -2494,6 +2503,14 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op, return lowerVPOp(Op, DAG, RISCVISD::SRL_VL); case ISD::VP_SHL: return lowerVPOp(Op, DAG, RISCVISD::SHL_VL); + case ISD::VP_FADD: + return lowerVPOp(Op, DAG, RISCVISD::FADD_VL); + case ISD::VP_FSUB: + return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL); + case ISD::VP_FMUL: + return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL); + case ISD::VP_FDIV: + return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL); } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll new file mode 100644 index 0000000..e6ed948 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fadd.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfadd_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fadd.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfadd_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfadd_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fadd.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfadd_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfadd_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fadd.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfadd_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfadd_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fadd.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfadd_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfadd_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fadd.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfadd_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfadd_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfadd_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfadd_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fadd.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfadd_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfadd_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fadd.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfadd_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfadd_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fadd.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfadd_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfadd_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fadd.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfadd_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfadd_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fadd.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfadd_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfadd_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfadd_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfadd_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll new file mode 100644 index 0000000..87aaff0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfdiv_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfdiv_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfdiv_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfdiv_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfdiv_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfdiv_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfdiv_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfdiv_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfdiv_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfdiv_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfdiv_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfdiv_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfdiv_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll new file mode 100644 index 0000000..259fbfb --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fmul.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfmul_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fmul.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfmul_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfmul_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fmul.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfmul_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfmul_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fmul.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfmul_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfmul_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fmul.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfmul_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfmul_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fmul.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfmul_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfmul_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fmul.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfmul_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfmul_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fmul.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfmul_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfmul_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fmul.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfmul_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfmul_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fmul.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfmul_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfmul_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fmul.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfmul_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfmul_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fmul.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfmul_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfmul_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfmul_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfmul_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll new file mode 100644 index 0000000..5745e03 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrdiv-vp.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fdiv.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfrdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfrdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfrdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfrdiv_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fdiv.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfrdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfrdiv_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fdiv.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfrdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfrdiv_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fdiv.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfrdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfrdiv_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fdiv.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfrdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfrdiv_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fdiv.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfrdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfrdiv_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fdiv.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfrdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfrdiv_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fdiv.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfrdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfrdiv_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fdiv.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfrdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfrdiv_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fdiv.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfrdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfrdiv_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fdiv.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfrdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfrdiv_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrdiv.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll new file mode 100644 index 0000000..8762766 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfrsub-vp.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfrsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfrsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %vb, <2 x half> %va, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfrsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfrsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %vb, <4 x half> %va, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfrsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfrsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %vb, <8 x half> %va, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfrsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfrsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %vb, <16 x half> %va, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfrsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfrsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %vb, <2 x float> %va, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfrsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfrsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %vb, <4 x float> %va, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfrsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfrsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %vb, <8 x float> %va, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfrsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfrsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %vb, <16 x float> %va, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfrsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfrsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %vb, <2 x double> %va, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfrsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfrsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %vb, <4 x double> %va, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfrsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfrsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %vb, <8 x double> %va, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfrsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfrsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %vb, <16 x double> %va, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll new file mode 100644 index 0000000..ee04b73 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -0,0 +1,629 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fsub.v2f16(<2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfsub_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> undef, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfsub_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfsub_vf_v4f16_unmasked(<4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> undef, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fsub.v8f16(<8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfsub_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfsub_vf_v8f16_unmasked(<8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> undef, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fsub.v16f16(<16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfsub_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfsub_vf_v16f16_unmasked(<16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> undef, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fsub.v2f32(<2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfsub_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfsub_vf_v2f32_unmasked(<2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> undef, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fsub.v4f32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfsub_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfsub_vf_v4f32_unmasked(<4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> undef, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fsub.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfsub_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfsub_vf_v8f32_unmasked(<8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> undef, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fsub.v16f32(<16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfsub_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfsub_vf_v16f32_unmasked(<16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> undef, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fsub.v2f64(<2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfsub_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfsub_vf_v2f64_unmasked(<2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> undef, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> undef, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> undef, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> undef, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fsub.v4f64(<4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfsub_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfsub_vf_v4f64_unmasked(<4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> undef, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> undef, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> undef, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> undef, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fsub.v8f64(<8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfsub_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfsub_vf_v8f64_unmasked(<8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> undef, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> undef, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> undef, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> undef, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <16 x double> @llvm.vp.fsub.v16f64(<16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfsub_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfsub_vf_v16f64_unmasked(<16 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> undef, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> undef, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> undef, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> undef, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll new file mode 100644 index 0000000..2f33166 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +declare <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +declare <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +declare <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +declare <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +declare <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) + +define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +declare <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x float> @vfadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfadd_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, <vscale x 1 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfadd_vf_nxv1f32_unmasked(<vscale x 1 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +declare <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x float> @vfadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfadd_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, <vscale x 2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfadd_vf_nxv2f32_unmasked(<vscale x 2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +declare <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x float> @vfadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfadd_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, <vscale x 4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfadd_vf_nxv4f32_unmasked(<vscale x 4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +declare <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x float> @vfadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfadd_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, <vscale x 8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfadd_vf_nxv8f32_unmasked(<vscale x 8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +declare <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x float> @vfadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfadd_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, <vscale x 16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfadd_vf_nxv16f32_unmasked(<vscale x 16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +declare <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x double> @vfadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfadd_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, <vscale x 1 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfadd_vf_nxv1f64_unmasked(<vscale x 1 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +declare <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x double> @vfadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfadd_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, <vscale x 2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfadd_vf_nxv2f64_unmasked(<vscale x 2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +declare <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x double> @vfadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfadd_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, <vscale x 4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfadd_vf_nxv4f64_unmasked(<vscale x 4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +declare <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfadd_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, <vscale x 8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfadd_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfadd_vf_nxv8f64_unmasked(<vscale x 8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll new file mode 100644 index 0000000..c5f219c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfdiv_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfdiv_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +declare <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfdiv_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfdiv_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +declare <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfdiv_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfdiv_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +declare <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfdiv_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfdiv_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +declare <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfdiv_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfdiv_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +declare <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) + +define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +declare <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x float> @vfdiv_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfdiv_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, <vscale x 1 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfdiv_vf_nxv1f32_unmasked(<vscale x 1 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +declare <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x float> @vfdiv_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfdiv_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, <vscale x 2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfdiv_vf_nxv2f32_unmasked(<vscale x 2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +declare <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x float> @vfdiv_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfdiv_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, <vscale x 4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfdiv_vf_nxv4f32_unmasked(<vscale x 4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +declare <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x float> @vfdiv_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfdiv_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, <vscale x 8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfdiv_vf_nxv8f32_unmasked(<vscale x 8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +declare <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x float> @vfdiv_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfdiv_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, <vscale x 16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfdiv_vf_nxv16f32_unmasked(<vscale x 16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +declare <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x double> @vfdiv_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfdiv_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, <vscale x 1 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfdiv_vf_nxv1f64_unmasked(<vscale x 1 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +declare <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x double> @vfdiv_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfdiv_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, <vscale x 2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfdiv_vf_nxv2f64_unmasked(<vscale x 2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +declare <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x double> @vfdiv_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfdiv_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, <vscale x 4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfdiv_vf_nxv4f64_unmasked(<vscale x 4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +declare <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfdiv_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, <vscale x 8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfdiv_vf_nxv8f64_unmasked(<vscale x 8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll new file mode 100644 index 0000000..f33fd4a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfmul_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfmul_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +declare <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfmul_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfmul_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +declare <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfmul_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfmul_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +declare <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfmul_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfmul_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +declare <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfmul_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfmul_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +declare <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) + +define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +declare <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x float> @vfmul_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfmul_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, <vscale x 1 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfmul_vf_nxv1f32_unmasked(<vscale x 1 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +declare <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x float> @vfmul_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfmul_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, <vscale x 2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfmul_vf_nxv2f32_unmasked(<vscale x 2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +declare <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x float> @vfmul_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfmul_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, <vscale x 4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfmul_vf_nxv4f32_unmasked(<vscale x 4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +declare <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x float> @vfmul_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfmul_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, <vscale x 8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfmul_vf_nxv8f32_unmasked(<vscale x 8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +declare <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x float> @vfmul_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfmul_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, <vscale x 16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfmul_vf_nxv16f32_unmasked(<vscale x 16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +declare <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x double> @vfmul_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfmul_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, <vscale x 1 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfmul_vf_nxv1f64_unmasked(<vscale x 1 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +declare <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x double> @vfmul_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfmul_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, <vscale x 2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfmul_vf_nxv2f64_unmasked(<vscale x 2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +declare <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x double> @vfmul_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfmul_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, <vscale x 4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfmul_vf_nxv4f64_unmasked(<vscale x 4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +declare <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfmul_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, <vscale x 8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfmul_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfmul_vf_nxv8f64_unmasked(<vscale x 8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll new file mode 100644 index 0000000..8aa3875 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-vp.ll @@ -0,0 +1,485 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x half> @vfrdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfrdiv_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +declare <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x half> @vfrdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfrdiv_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +declare <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x half> @vfrdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfrdiv_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +declare <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x half> @vfrdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfrdiv_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +declare <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x half> @vfrdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfrdiv_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +declare <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) + +define <vscale x 32 x half> @vfrdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfrdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +declare <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x float> @vfrdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfrdiv_vf_nxv1f32_unmasked(<vscale x 1 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +declare <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x float> @vfrdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfrdiv_vf_nxv2f32_unmasked(<vscale x 2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +declare <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x float> @vfrdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfrdiv_vf_nxv4f32_unmasked(<vscale x 4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +declare <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x float> @vfrdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfrdiv_vf_nxv8f32_unmasked(<vscale x 8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +declare <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x float> @vfrdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfrdiv_vf_nxv16f32_unmasked(<vscale x 16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +declare <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x double> @vfrdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfrdiv_vf_nxv1f64_unmasked(<vscale x 1 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +declare <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x double> @vfrdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfrdiv_vf_nxv2f64_unmasked(<vscale x 2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +declare <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x double> @vfrdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfrdiv_vf_nxv4f64_unmasked(<vscale x 4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +declare <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x double> @vfrdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfrdiv_vf_nxv8f64_unmasked(<vscale x 8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrdiv_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll new file mode 100644 index 0000000..13f5cd7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-vp.ll @@ -0,0 +1,485 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x half> @vfrsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfrsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +declare <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x half> @vfrsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfrsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +declare <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x half> @vfrsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfrsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +declare <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x half> @vfrsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfrsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +declare <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x half> @vfrsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfrsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +declare <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) + +define <vscale x 32 x half> @vfrsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfrsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %vb, <vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +declare <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x float> @vfrsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfrsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x float> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +declare <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x float> @vfrsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfrsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +declare <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x float> @vfrsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfrsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +declare <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x float> @vfrsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfrsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +declare <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x float> @vfrsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfrsub_vf_nxv16f32_unmasked(<vscale x 16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %vb, <vscale x 16 x float> %va, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +declare <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x double> @vfrsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfrsub_vf_nxv1f64_unmasked(<vscale x 1 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v25, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %vb, <vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +declare <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x double> @vfrsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfrsub_vf_nxv2f64_unmasked(<vscale x 2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v26, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %vb, <vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +declare <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x double> @vfrsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfrsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v28, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %vb, <vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +declare <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x double> @vfrsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfrsub_vf_nxv8f64_unmasked(<vscale x 8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfrsub_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v8 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll new file mode 100644 index 0000000..cfcfdf8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -0,0 +1,815 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <vscale x 1 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +define <vscale x 1 x half> @vfsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x half> %v +} + +declare <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <vscale x 2 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +define <vscale x 2 x half> @vfsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x half> %v +} + +declare <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <vscale x 4 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +define <vscale x 4 x half> @vfsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x half> %v +} + +declare <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <vscale x 8 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +define <vscale x 8 x half> @vfsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x half> %v +} + +declare <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, <vscale x 16 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +define <vscale x 16 x half> @vfsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x half> %v +} + +declare <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x i1>, i32) + +define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, <vscale x 32 x half> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va, half %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 32 x half> undef, half %b, i32 0 + %vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> undef, <vscale x 32 x i32> zeroinitializer + %head = insertelement <vscale x 32 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> undef, <vscale x 32 x i32> zeroinitializer + %v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl) + ret <vscale x 32 x half> %v +} + +declare <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x float> @vfsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfsub_vv_nxv1f32_unmasked(<vscale x 1 x float> %va, <vscale x 1 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +define <vscale x 1 x float> @vfsub_vf_nxv1f32_unmasked(<vscale x 1 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x float> %v +} + +declare <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x float> @vfsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfsub_vv_nxv2f32_unmasked(<vscale x 2 x float> %va, <vscale x 2 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +define <vscale x 2 x float> @vfsub_vf_nxv2f32_unmasked(<vscale x 2 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x float> %v +} + +declare <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x float> @vfsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfsub_vv_nxv4f32_unmasked(<vscale x 4 x float> %va, <vscale x 4 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +define <vscale x 4 x float> @vfsub_vf_nxv4f32_unmasked(<vscale x 4 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x float> %v +} + +declare <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x float> @vfsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfsub_vv_nxv8f32_unmasked(<vscale x 8 x float> %va, <vscale x 8 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +define <vscale x 8 x float> @vfsub_vf_nxv8f32_unmasked(<vscale x 8 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x float> %v +} + +declare <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32) + +define <vscale x 16 x float> @vfsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfsub_vv_nxv16f32_unmasked(<vscale x 16 x float> %va, <vscale x 16 x float> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +define <vscale x 16 x float> @vfsub_vf_nxv16f32_unmasked(<vscale x 16 x float> %va, float %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 16 x float> undef, float %b, i32 0 + %vb = shufflevector <vscale x 16 x float> %elt.head, <vscale x 16 x float> undef, <vscale x 16 x i32> zeroinitializer + %head = insertelement <vscale x 16 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer + %v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl) + ret <vscale x 16 x float> %v +} + +declare <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32) + +define <vscale x 1 x double> @vfsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfsub_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, <vscale x 1 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +define <vscale x 1 x double> @vfsub_vf_nxv1f64_unmasked(<vscale x 1 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vfmv.v.f v25, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v25 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 1 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> undef, <vscale x 1 x i32> zeroinitializer + %head = insertelement <vscale x 1 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer + %v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl) + ret <vscale x 1 x double> %v +} + +declare <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32) + +define <vscale x 2 x double> @vfsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfsub_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, <vscale x 2 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +define <vscale x 2 x double> @vfsub_vf_nxv2f64_unmasked(<vscale x 2 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu +; CHECK-NEXT: vfmv.v.f v26, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v26 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 2 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 2 x double> %elt.head, <vscale x 2 x double> undef, <vscale x 2 x i32> zeroinitializer + %head = insertelement <vscale x 2 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer + %v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl) + ret <vscale x 2 x double> %v +} + +declare <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32) + +define <vscale x 4 x double> @vfsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfsub_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, <vscale x 4 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +define <vscale x 4 x double> @vfsub_vf_nxv4f64_unmasked(<vscale x 4 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu +; CHECK-NEXT: vfmv.v.f v28, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v28 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 4 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 4 x double> %elt.head, <vscale x 4 x double> undef, <vscale x 4 x i32> zeroinitializer + %head = insertelement <vscale x 4 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer + %v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl) + ret <vscale x 4 x double> %v +} + +declare <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32) + +define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfsub_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, <vscale x 8 x double> %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} + +define <vscale x 8 x double> @vfsub_vf_nxv8f64_unmasked(<vscale x 8 x double> %va, double %b, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vfmv.v.f v16, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <vscale x 8 x double> undef, double %b, i32 0 + %vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> undef, <vscale x 8 x i32> zeroinitializer + %head = insertelement <vscale x 8 x i1> undef, i1 true, i32 0 + %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer + %v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x double> %v +} |