aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV')
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll1575
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll139
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll1724
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-stackmap.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll253
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr165232.ll244
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir12
-rw-r--r--llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll703
9 files changed, 4649 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll
new file mode 100644
index 0000000..785d9fc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vse.ll
@@ -0,0 +1,1575 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare void @llvm.riscv.vse.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+define void @intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_allonesmask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i1> splat (i1 true),
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv1f64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv2f64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv2f64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv4f64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv4f64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv8f64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv8f64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f32(
+ <vscale x 1 x float>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv1f32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1f32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f32(
+ <vscale x 2 x float>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv2f32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv2f32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f32(
+ <vscale x 4 x float>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv4f32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv4f32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f32(
+ <vscale x 8 x float>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv8f32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv8f32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16f32(
+ <vscale x 16 x float>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv16f32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16f32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vse32.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv16f32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f16(
+ <vscale x 1 x half>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv1f16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f16(
+ <vscale x 2 x half>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv2f16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv2f16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f16(
+ <vscale x 4 x half>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv4f16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv4f16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f16(
+ <vscale x 8 x half>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv8f16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv8f16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16f16(
+ <vscale x 16 x half>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv16f16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16f16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv16f16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32f16(
+ <vscale x 32 x half>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv32f16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32f16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vse16.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv32f16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ iXLen);
+
+define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ iXLen %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i1>,
+ iXLen);
+
+define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vse8.v v8, (a0), v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vse.mask.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ <vscale x 64 x i1> %2,
+ iXLen %3)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll
new file mode 100644
index 0000000..5237536
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsm.ll
@@ -0,0 +1,139 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -global-isel -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -global-isel -verify-machineinstrs | FileCheck %s
+
+declare void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1>, ptr, iXLen);
+
+define void @intrinsic_vsm_v_nxv1i1(<vscale x 1 x i1> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm_v_nxv1i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsm.v v0, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %0, ptr %1, iXLen %2)
+ ret void
+}
+
+declare void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1>, ptr, iXLen);
+
+define void @intrinsic_vsm_v_nxv2i1(<vscale x 2 x i1> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm_v_nxv2i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsm.v v0, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsm.nxv2i1(<vscale x 2 x i1> %0, ptr %1, iXLen %2)
+ ret void
+}
+
+declare void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1>, ptr, iXLen);
+
+define void @intrinsic_vsm_v_nxv4i1(<vscale x 4 x i1> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm_v_nxv4i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsm.v v0, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsm.nxv4i1(<vscale x 4 x i1> %0, ptr %1, iXLen %2)
+ ret void
+}
+
+declare void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1>, ptr, iXLen);
+
+define void @intrinsic_vsm_v_nxv8i1(<vscale x 8 x i1> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm_v_nxv8i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsm.v v0, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsm.nxv8i1(<vscale x 8 x i1> %0, ptr %1, iXLen %2)
+ ret void
+}
+
+declare void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1>, ptr, iXLen);
+
+define void @intrinsic_vsm_v_nxv16i1(<vscale x 16 x i1> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm_v_nxv16i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsm.v v0, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsm.nxv16i1(<vscale x 16 x i1> %0, ptr %1, iXLen %2)
+ ret void
+}
+
+declare void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1>, ptr, iXLen);
+
+define void @intrinsic_vsm_v_nxv32i1(<vscale x 32 x i1> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm_v_nxv32i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsm.v v0, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsm.nxv32i1(<vscale x 32 x i1> %0, ptr %1, iXLen %2)
+ ret void
+}
+
+declare void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1>, ptr, iXLen);
+
+define void @intrinsic_vsm_v_nxv64i1(<vscale x 64 x i1> %0, ptr %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vsm_v_nxv64i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsm.v v0, (a0)
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsm.nxv64i1(<vscale x 64 x i1> %0, ptr %1, iXLen %2)
+ ret void
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ iXLen);
+
+; Make sure we can use the vsetvli from the producing instruction.
+define void @test_vsetvli_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, ptr %2, iXLen %3) nounwind {
+; CHECK-LABEL: test_vsetvli_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ iXLen %3)
+ call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, ptr %2, iXLen %3)
+ ret void
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @test_vsetvli_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, ptr %2, iXLen %3) nounwind {
+; CHECK-LABEL: test_vsetvli_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ iXLen %3)
+ call void @llvm.riscv.vsm.nxv1i1(<vscale x 1 x i1> %a, ptr %2, iXLen %3)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll
new file mode 100644
index 0000000..b7609ff
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsse.ll
@@ -0,0 +1,1724 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare void @llvm.riscv.vsse.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+define void @intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_allonesmask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> splat (i1 true),
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv1f64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f64(
+ <vscale x 1 x double>,
+ ptr,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv2f64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f64(
+ <vscale x 2 x double>,
+ ptr,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv2f64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv4f64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f64(
+ <vscale x 4 x double>,
+ ptr,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv4f64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv8f64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f64(
+ <vscale x 8 x double>,
+ ptr,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv8f64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ iXLen,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f32(
+ <vscale x 1 x float>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv1f32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f32(
+ <vscale x 1 x float>,
+ ptr,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1f32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f32(
+ <vscale x 2 x float>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv2f32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f32(
+ <vscale x 2 x float>,
+ ptr,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv2f32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f32(
+ <vscale x 4 x float>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv4f32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f32(
+ <vscale x 4 x float>,
+ ptr,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv4f32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f32(
+ <vscale x 8 x float>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv8f32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f32(
+ <vscale x 8 x float>,
+ ptr,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv8f32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16f32(
+ <vscale x 16 x float>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv16f32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16f32(
+ <vscale x 16 x float>,
+ ptr,
+ iXLen,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vsse32.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv16f32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ iXLen,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ iXLen,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f16(
+ <vscale x 1 x half>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv1f16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f16(
+ <vscale x 1 x half>,
+ ptr,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f16(
+ <vscale x 2 x half>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv2f16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f16(
+ <vscale x 2 x half>,
+ ptr,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv2f16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f16(
+ <vscale x 4 x half>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv4f16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f16(
+ <vscale x 4 x half>,
+ ptr,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv4f16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f16(
+ <vscale x 8 x half>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv8f16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f16(
+ <vscale x 8 x half>,
+ ptr,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv8f16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16f16(
+ <vscale x 16 x half>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv16f16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16f16(
+ <vscale x 16 x half>,
+ ptr,
+ iXLen,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv16f16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32f16(
+ <vscale x 32 x half>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv32f16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32f16(
+ <vscale x 32 x half>,
+ ptr,
+ iXLen,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
+; CHECK-NEXT: vsse16.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv32f16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ iXLen,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ iXLen,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ iXLen,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ iXLen,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ iXLen,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ iXLen,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ iXLen,
+ iXLen);
+
+define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ iXLen,
+ <vscale x 64 x i1>,
+ iXLen);
+
+define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsse.mask.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ iXLen %2,
+ <vscale x 64 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 37e11db..988d049 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -136,6 +136,7 @@
; CHECK-NEXT: shgatpa - 'Shgatpa' (SvNNx4 mode supported for all modes supported by satp, as well as Bare).
; CHECK-NEXT: shifted-zextw-fusion - Enable SLLI+SRLI to be fused when computing (shifted) word zero extension.
; CHECK-NEXT: shlcofideleg - 'Shlcofideleg' (Delegating LCOFI Interrupts to VS-mode).
+; CHECK-NEXT: short-forward-branch-i-minmax - Enable short forward branch optimization for min,max instructions in Zbb.
; CHECK-NEXT: short-forward-branch-opt - Enable short forward branch optimization.
; CHECK-NEXT: shtvala - 'Shtvala' (htval provides all needed values).
; CHECK-NEXT: shvsatpa - 'Shvsatpa' (vsatp supports all modes supported by satp).
diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
index c3183a1..9aefa90 100644
--- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
@@ -38,8 +38,8 @@
; CHECK-NEXT: .quad liveConstant
; CHECK-NEXT: .quad 0
; CHECK-NEXT: .quad 1
-; CHECK-NEXT: .quad spilledValue
-; CHECK-NEXT: .quad 144
+; CHECK-NEXT: .quad liveArgs
+; CHECK-NEXT: .quad 0
; CHECK-NEXT: .quad 1
; CHECK-NEXT: .quad directFrameIdx
; CHECK-NEXT: .quad 48
@@ -278,7 +278,7 @@ define void @liveConstant() {
;
; Verify 28 stack map entries.
;
-; CHECK-LABEL: .word .L{{.*}}-spilledValue
+; CHECK-LABEL: .word .L{{.*}}-liveArgs
; CHECK-NEXT: .half 0
; CHECK-NEXT: .half 28
;
@@ -290,7 +290,7 @@ define void @liveConstant() {
; CHECK-NEXT: .half 2
; CHECK-NEXT: .half 0
; CHECK-NEXT: .word
-define void @spilledValue(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i8 %l25, i16 zeroext %l26, i32 signext %l27) {
+define void @liveArgs(i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i8 %l25, i16 zeroext %l26, i32 signext %l27) {
entry:
call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, i64 %arg0, i64 %arg1, i64 %arg2, i64 %arg3, i64 %arg4, i64 %l0, i64 %l1, i64 %l2, i64 %l3, i64 %l4, i64 %l5, i64 %l6, i64 %l7, i64 %l8, i64 %l9, i64 %l10, i64 %l11, i64 %l12, i64 %l13, i64 %l14, i64 %l15, i64 %l16, i64 %l17, i64 %l18, i64 %l19, i64 %l20, i64 %l21, i64 %l22, i64 %l23, i64 %l24, i8 %l25, i16 %l26, i32 %l27)
ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 4c35b25..7e6f2c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -15265,6 +15265,259 @@ define <4 x i32> @masked_gather_widen_sew_negative_stride(ptr %base) {
ret <4 x i32> %x
}
+define <7 x i8> @mgather_baseidx_v7i8(ptr %base, <7 x i8> %idxs, <7 x i1> %m, <7 x i8> %passthru) {
+; RV32-LABEL: mgather_baseidx_v7i8:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 127
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.s.x v10, a1
+; RV32-NEXT: vmand.mm v0, v0, v10
+; RV32-NEXT: vsext.vf4 v10, v8
+; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
+; RV32-NEXT: vluxei32.v v9, (a0), v10, v0.t
+; RV32-NEXT: vmv1r.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64V-LABEL: mgather_baseidx_v7i8:
+; RV64V: # %bb.0:
+; RV64V-NEXT: li a1, 127
+; RV64V-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64V-NEXT: vmv.s.x v10, a1
+; RV64V-NEXT: vmand.mm v0, v0, v10
+; RV64V-NEXT: vsext.vf8 v12, v8
+; RV64V-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
+; RV64V-NEXT: vluxei64.v v9, (a0), v12, v0.t
+; RV64V-NEXT: vmv1r.v v8, v9
+; RV64V-NEXT: ret
+;
+; RV64ZVE32F-LABEL: mgather_baseidx_v7i8:
+; RV64ZVE32F: # %bb.0:
+; RV64ZVE32F-NEXT: addi sp, sp, -16
+; RV64ZVE32F-NEXT: .cfi_def_cfa_offset 16
+; RV64ZVE32F-NEXT: .cfi_remember_state
+; RV64ZVE32F-NEXT: li a1, 64
+; RV64ZVE32F-NEXT: addi a2, sp, 8
+; RV64ZVE32F-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64ZVE32F-NEXT: vsm.v v0, (a2)
+; RV64ZVE32F-NEXT: ld a1, 8(sp)
+; RV64ZVE32F-NEXT: andi a2, a1, 1
+; RV64ZVE32F-NEXT: beqz a2, .LBB132_2
+; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1
+; RV64ZVE32F-NEXT: vmv.v.x v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 3
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 5
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 6
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v10, a2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT: .LBB132_2: # %else
+; RV64ZVE32F-NEXT: andi a2, a1, 2
+; RV64ZVE32F-NEXT: beqz a2, .LBB132_4
+; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 2
+; RV64ZVE32F-NEXT: vmv.x.s a3, v10
+; RV64ZVE32F-NEXT: add a3, a0, a3
+; RV64ZVE32F-NEXT: lbu a3, 0(a3)
+; RV64ZVE32F-NEXT: vmv.v.x v10, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v11
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 3
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a3
+; RV64ZVE32F-NEXT: vmv.x.s a3, v11
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 4
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v11
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 5
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 6
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a3
+; RV64ZVE32F-NEXT: vmv.x.s a3, v11
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a3
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v10, a2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT: .LBB132_4: # %else2
+; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 2
+; RV64ZVE32F-NEXT: beqz a2, .LBB132_6
+; RV64ZVE32F-NEXT: # %bb.5: # %cond.load4
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 1
+; RV64ZVE32F-NEXT: vmv.x.s a3, v9
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v9, 3
+; RV64ZVE32F-NEXT: vmv.x.s a4, v11
+; RV64ZVE32F-NEXT: vmv.v.x v11, a3
+; RV64ZVE32F-NEXT: vmv.x.s a3, v12
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v9, 4
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a4
+; RV64ZVE32F-NEXT: vmv.x.s a4, v12
+; RV64ZVE32F-NEXT: vslidedown.vi v12, v9, 5
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 6
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v12
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a3
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a4
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v11, a2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT: .LBB132_6: # %else5
+; RV64ZVE32F-NEXT: andi a2, a1, 8
+; RV64ZVE32F-NEXT: beqz a2, .LBB132_8
+; RV64ZVE32F-NEXT: # %bb.7: # %cond.load7
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 1
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vmv.x.s a3, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2
+; RV64ZVE32F-NEXT: vmv.x.s a4, v11
+; RV64ZVE32F-NEXT: vmv.v.x v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a4
+; RV64ZVE32F-NEXT: vmv.x.s a4, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 5
+; RV64ZVE32F-NEXT: add a3, a0, a3
+; RV64ZVE32F-NEXT: lbu a3, 0(a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 6
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v11, a3
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a4
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v10, a2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT: .LBB132_8: # %else8
+; RV64ZVE32F-NEXT: andi a2, a1, 16
+; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 4
+; RV64ZVE32F-NEXT: bnez a2, .LBB132_13
+; RV64ZVE32F-NEXT: # %bb.9: # %else11
+; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: bnez a2, .LBB132_14
+; RV64ZVE32F-NEXT: .LBB132_10: # %else14
+; RV64ZVE32F-NEXT: andi a1, a1, 64
+; RV64ZVE32F-NEXT: beqz a1, .LBB132_12
+; RV64ZVE32F-NEXT: .LBB132_11: # %cond.load16
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v9
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: vmv.v.x v8, a1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2
+; RV64ZVE32F-NEXT: add a0, a0, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 3
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT: vmv.x.s a1, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 5
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vmv.x.s a1, v9
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
+; RV64ZVE32F-NEXT: .LBB132_12: # %else17
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; RV64ZVE32F-NEXT: vmv1r.v v8, v9
+; RV64ZVE32F-NEXT: addi sp, sp, 16
+; RV64ZVE32F-NEXT: .cfi_def_cfa_offset 0
+; RV64ZVE32F-NEXT: ret
+; RV64ZVE32F-NEXT: .LBB132_13: # %cond.load10
+; RV64ZVE32F-NEXT: .cfi_restore_state
+; RV64ZVE32F-NEXT: vmv.x.s a2, v8
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 1
+; RV64ZVE32F-NEXT: vmv.x.s a3, v9
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 2
+; RV64ZVE32F-NEXT: vmv.x.s a4, v10
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vmv.v.x v10, a3
+; RV64ZVE32F-NEXT: vmv.x.s a3, v11
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 3
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a4
+; RV64ZVE32F-NEXT: vmv.x.s a4, v11
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 5
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 6
+; RV64ZVE32F-NEXT: add a2, a0, a2
+; RV64ZVE32F-NEXT: lbu a2, 0(a2)
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a3
+; RV64ZVE32F-NEXT: vmv.x.s a3, v11
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a4
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a3
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v10, a2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: beqz a2, .LBB132_10
+; RV64ZVE32F-NEXT: .LBB132_14: # %cond.load13
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 1
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 1
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vmv.x.s a3, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 2
+; RV64ZVE32F-NEXT: vmv.x.s a4, v11
+; RV64ZVE32F-NEXT: vmv.v.x v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 3
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a4
+; RV64ZVE32F-NEXT: vmv.x.s a4, v10
+; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 6
+; RV64ZVE32F-NEXT: add a3, a0, a3
+; RV64ZVE32F-NEXT: lbu a3, 0(a3)
+; RV64ZVE32F-NEXT: vslide1down.vx v11, v11, a2
+; RV64ZVE32F-NEXT: vmv.x.s a2, v10
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v11, a4
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v10, v10, a3
+; RV64ZVE32F-NEXT: vmv.x.s a2, v9
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v10, a2
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT: andi a1, a1, 64
+; RV64ZVE32F-NEXT: bnez a1, .LBB132_11
+; RV64ZVE32F-NEXT: j .LBB132_12
+ %ptrs = getelementptr inbounds i8, ptr %base, <7 x i8> %idxs
+ %v = call <7 x i8> @llvm.masked.gather.v7i8.v7p0(<7 x ptr> %ptrs, i32 1, <7 x i1> %m, <7 x i8> %passthru)
+ ret <7 x i8> %v
+}
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV32V-ZVFH: {{.*}}
; RV32V-ZVFHMIN: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr165232.ll b/llvm/test/CodeGen/RISCV/rvv/pr165232.ll
new file mode 100644
index 0000000..bef53c6
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/pr165232.ll
@@ -0,0 +1,244 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
+
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "riscv64-unknown-linux-gnu"
+
+define i1 @main(ptr %var_117, ptr %arrayinit.element3045, ptr %arrayinit.element3047, ptr %arrayinit.element3049, ptr %arrayinit.element3051, ptr %arrayinit.element3053, ptr %arrayinit.element3055, ptr %arrayinit.element3057, ptr %arrayinit.element3059, ptr %arrayinit.element3061, ptr %arrayinit.element3063, ptr %arrayinit.element3065, ptr %arrayinit.element3067, i64 %var_94_i.07698, target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %0, target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %1) {
+; CHECK-LABEL: main:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr t0, vlenb
+; CHECK-NEXT: slli t0, t0, 3
+; CHECK-NEXT: mv t1, t0
+; CHECK-NEXT: slli t0, t0, 1
+; CHECK-NEXT: add t0, t0, t1
+; CHECK-NEXT: sub sp, sp, t0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: sd a1, 8(sp) # 8-byte Folded Spill
+; CHECK-NEXT: sd a2, 0(sp) # 8-byte Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs4r.v v12, (a1) # vscale x 32-byte Folded Spill
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 2
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: vs4r.v v16, (a1) # vscale x 32-byte Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 2
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs4r.v v8, (a1) # vscale x 32-byte Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: ld t0, 56(a1)
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: ld t1, 48(a1)
+; CHECK-NEXT: vsetvli t2, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: ld t2, 40(a1)
+; CHECK-NEXT: # kill: def $v10 killed $v9 killed $vtype
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: ld t3, 32(a1)
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: ld t4, 16(a1)
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: mv a2, a1
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: ld t5, 24(a1)
+; CHECK-NEXT: vmv.v.i v13, 0
+; CHECK-NEXT: vsetvli t6, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v22, 0
+; CHECK-NEXT: vmv1r.v v14, v9
+; CHECK-NEXT: sd zero, 0(a0)
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: vmv1r.v v15, v9
+; CHECK-NEXT: vmv1r.v v18, v9
+; CHECK-NEXT: li t6, 1023
+; CHECK-NEXT: vmv.v.i v26, 0
+; CHECK-NEXT: vmv1r.v v19, v9
+; CHECK-NEXT: slli t6, t6, 52
+; CHECK-NEXT: vmv.v.i v28, 0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs2r.v v22, (a1) # vscale x 16-byte Folded Spill
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: vs4r.v v24, (a1) # vscale x 32-byte Folded Spill
+; CHECK-NEXT: slli a2, a2, 1
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: ld a2, 0(sp) # 8-byte Folded Reload
+; CHECK-NEXT: vs2r.v v28, (a1) # vscale x 16-byte Folded Spill
+; CHECK-NEXT: ld a1, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT: vmv1r.v v20, v9
+; CHECK-NEXT: sd t6, 0(t5)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v14, v12
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v9
+; CHECK-NEXT: vmv1r.v v21, v9
+; CHECK-NEXT: csrr t5, vlenb
+; CHECK-NEXT: slli t5, t5, 3
+; CHECK-NEXT: add t5, sp, t5
+; CHECK-NEXT: addi t5, t5, 16
+; CHECK-NEXT: vs2r.v v18, (t5) # vscale x 16-byte Folded Spill
+; CHECK-NEXT: csrr t6, vlenb
+; CHECK-NEXT: slli t6, t6, 1
+; CHECK-NEXT: add t5, t5, t6
+; CHECK-NEXT: vs2r.v v20, (t5) # vscale x 16-byte Folded Spill
+; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v19, 0
+; CHECK-NEXT: vmclr.m v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v6, 0
+; CHECK-NEXT: .LBB0_1: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vmv1r.v v20, v19
+; CHECK-NEXT: vmv1r.v v3, v19
+; CHECK-NEXT: vmv1r.v v5, v19
+; CHECK-NEXT: vmv1r.v v2, v19
+; CHECK-NEXT: vmv1r.v v31, v19
+; CHECK-NEXT: vmv1r.v v30, v19
+; CHECK-NEXT: vmv1r.v v4, v19
+; CHECK-NEXT: vmv2r.v v22, v10
+; CHECK-NEXT: vmv4r.v v24, v12
+; CHECK-NEXT: vmv2r.v v28, v16
+; CHECK-NEXT: vmv2r.v v8, v6
+; CHECK-NEXT: vmv1r.v v18, v19
+; CHECK-NEXT: vmv1r.v v21, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, ma
+; CHECK-NEXT: vle32.v v20, (t4)
+; CHECK-NEXT: vle32.v v3, (t1)
+; CHECK-NEXT: vle32.v v30, (a7)
+; CHECK-NEXT: vle64.v v8, (a4)
+; CHECK-NEXT: vle32.v v5, (t2)
+; CHECK-NEXT: vle32.v v2, (t3)
+; CHECK-NEXT: vle32.v v31, (a6)
+; CHECK-NEXT: vmv1r.v v24, v30
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; CHECK-NEXT: vmflt.vv v21, v8, v6, v0.t
+; CHECK-NEXT: vmv1r.v v8, v19
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
+; CHECK-NEXT: vle32.v v18, (a2)
+; CHECK-NEXT: vle32.v v8, (a3)
+; CHECK-NEXT: vle32.v v4, (a5)
+; CHECK-NEXT: vmv1r.v v22, v20
+; CHECK-NEXT: csrr t5, vlenb
+; CHECK-NEXT: slli t5, t5, 3
+; CHECK-NEXT: add t5, sp, t5
+; CHECK-NEXT: addi t5, t5, 16
+; CHECK-NEXT: vl1r.v v1, (t5) # vscale x 8-byte Folded Reload
+; CHECK-NEXT: csrr t6, vlenb
+; CHECK-NEXT: add t5, t5, t6
+; CHECK-NEXT: vl2r.v v2, (t5) # vscale x 16-byte Folded Reload
+; CHECK-NEXT: slli t6, t6, 1
+; CHECK-NEXT: add t5, t5, t6
+; CHECK-NEXT: vl1r.v v4, (t5) # vscale x 8-byte Folded Reload
+; CHECK-NEXT: vsseg4e32.v v1, (zero)
+; CHECK-NEXT: vsseg8e32.v v22, (a1)
+; CHECK-NEXT: vmv1r.v v0, v21
+; CHECK-NEXT: vssub.vv v8, v19, v18, v0.t
+; CHECK-NEXT: csrr t5, vlenb
+; CHECK-NEXT: slli t5, t5, 2
+; CHECK-NEXT: mv t6, t5
+; CHECK-NEXT: slli t5, t5, 1
+; CHECK-NEXT: add t5, t5, t6
+; CHECK-NEXT: add t5, sp, t5
+; CHECK-NEXT: addi t5, t5, 16
+; CHECK-NEXT: vl4r.v v20, (t5) # vscale x 32-byte Folded Reload
+; CHECK-NEXT: vsetvli zero, t0, e64, m2, ta, ma
+; CHECK-NEXT: vsseg2e64.v v20, (zero)
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: addi t5, sp, 16
+; CHECK-NEXT: vl4r.v v20, (t5) # vscale x 32-byte Folded Reload
+; CHECK-NEXT: csrr t6, vlenb
+; CHECK-NEXT: slli t6, t6, 2
+; CHECK-NEXT: add t5, t5, t6
+; CHECK-NEXT: vl4r.v v24, (t5) # vscale x 32-byte Folded Reload
+; CHECK-NEXT: vsetivli zero, 0, e64, m2, ta, ma
+; CHECK-NEXT: vsseg4e64.v v20, (zero), v0.t
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vsseg8e32.v v8, (a0)
+; CHECK-NEXT: csrr t5, vlenb
+; CHECK-NEXT: slli t5, t5, 4
+; CHECK-NEXT: add t5, sp, t5
+; CHECK-NEXT: addi t5, t5, 16
+; CHECK-NEXT: vl4r.v v20, (t5) # vscale x 32-byte Folded Reload
+; CHECK-NEXT: csrr t6, vlenb
+; CHECK-NEXT: slli t6, t6, 2
+; CHECK-NEXT: add t5, t5, t6
+; CHECK-NEXT: vl4r.v v24, (t5) # vscale x 32-byte Folded Reload
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT: vsseg4e64.v v20, (zero)
+; CHECK-NEXT: j .LBB0_1
+entry:
+ store double 0.000000e+00, ptr %var_117, align 8
+ store double 1.000000e+00, ptr %arrayinit.element3061, align 8
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %2 = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.p0.i64(<vscale x 2 x float> zeroinitializer, ptr %arrayinit.element3059, i64 0)
+ %3 = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.p0.i64(<vscale x 2 x float> zeroinitializer, ptr %arrayinit.element3067, i64 0)
+ %4 = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.p0.i64(<vscale x 2 x float> zeroinitializer, ptr %arrayinit.element3065, i64 0)
+ %5 = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.p0.i64(<vscale x 2 x float> zeroinitializer, ptr %arrayinit.element3063, i64 0)
+ %6 = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.p0.i64(<vscale x 2 x float> zeroinitializer, ptr %arrayinit.element3055, i64 0)
+ %7 = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.p0.i64(<vscale x 2 x float> zeroinitializer, ptr %arrayinit.element3057, i64 0)
+ %8 = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32.p0.i64(<vscale x 2 x float> zeroinitializer, ptr %arrayinit.element3053, i64 0)
+ %9 = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64.p0.i64(<vscale x 2 x double> zeroinitializer, ptr %arrayinit.element3051, i64 0)
+ %10 = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.p0.i64(<vscale x 2 x i32> zeroinitializer, ptr %arrayinit.element3047, i64 0)
+ %11 = tail call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32.p0.i64(<vscale x 2 x i32> zeroinitializer, ptr %arrayinit.element3049, i64 0)
+ call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv8i8_4t.p0.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 4) zeroinitializer, ptr null, i64 0, i64 5)
+ %12 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2f32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) zeroinitializer, <vscale x 2 x float> %8, i32 0)
+ %13 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2f32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %12, <vscale x 2 x float> %7, i32 2)
+ %14 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2f32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %13, <vscale x 2 x float> %6, i32 0)
+ %15 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2f32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %14, <vscale x 2 x float> %5, i32 0)
+ %16 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2f32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %15, <vscale x 2 x float> %4, i32 0)
+ %17 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2f32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %16, <vscale x 2 x float> %3, i32 0)
+ %18 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2f32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %17, <vscale x 2 x float> %2, i32 0)
+ call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t.p0.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %18, ptr %arrayinit.element3045, i64 0, i64 5)
+ %19 = tail call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x i1> zeroinitializer, <vscale x 2 x double> zeroinitializer, <vscale x 2 x double> %9, <vscale x 2 x i1> zeroinitializer, i64 0)
+ %20 = tail call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> %11, <vscale x 2 x i32> zeroinitializer, <vscale x 2 x i32> %10, <vscale x 2 x i1> %19, i64 0, i64 0)
+ call void @llvm.riscv.vsseg2.triscv.vector.tuple_nxv16i8_2t.p0.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %0, ptr null, i64 %var_94_i.07698, i64 6)
+ call void @llvm.riscv.vsseg4.mask.triscv.vector.tuple_nxv16i8_4t.p0.nxv2i1.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) zeroinitializer, ptr null, <vscale x 2 x i1> zeroinitializer, i64 0, i64 6)
+ %21 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 8) @llvm.riscv.tuple.insert.triscv.vector.tuple_nxv8i8_8t.nxv2i32(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) poison, <vscale x 2 x i32> %20, i32 0)
+ call void @llvm.riscv.vsseg8.triscv.vector.tuple_nxv8i8_8t.p0.i64(target("riscv.vector.tuple", <vscale x 8 x i8>, 8) %21, ptr %var_117, i64 0, i64 5)
+ call void @llvm.riscv.vsseg4.triscv.vector.tuple_nxv16i8_4t.p0.i64(target("riscv.vector.tuple", <vscale x 16 x i8>, 4) %1, ptr null, i64 0, i64 6)
+ br label %for.body
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
index dd9960d..9c2fa9d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-spill.mir
@@ -32,10 +32,10 @@ body: |
; CHECK-NEXT: $x11 = ADDI $x2, 16
; CHECK-NEXT: VS4R_V $v0m4, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s256>) into %stack.0, align 8)
; CHECK-NEXT: $x12 = PseudoReadVLENB
- ; CHECK-NEXT: $x13 = SLLI $x12, 2
- ; CHECK-NEXT: $x11 = ADD killed $x11, killed $x13
+ ; CHECK-NEXT: $x12 = SLLI killed $x12, 2
+ ; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: VS2R_V $v4m2, $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s128>) into %stack.0, align 8)
- ; CHECK-NEXT: $x12 = SLLI killed $x12, 1
+ ; CHECK-NEXT: $x12 = SRLI killed $x12, 1
; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12
; CHECK-NEXT: VS1R_V $v6, killed $x11, implicit $v0_v1_v2_v3_v4_v5_v6 :: (store (<vscale x 1 x s64>) into %stack.0)
; CHECK-NEXT: $x11 = ADDI $x2, 16
@@ -93,10 +93,10 @@ body: |
; CHECK-NEXT: $x11 = ADDI $x2, 16
; CHECK-NEXT: $v10m2 = VL2RE8_V $x11 :: (load (<vscale x 1 x s128>) from %stack.0, align 8)
; CHECK-NEXT: $x12 = PseudoReadVLENB
- ; CHECK-NEXT: $x13 = SLLI $x12, 1
- ; CHECK-NEXT: $x11 = ADD killed $x11, killed $x13
+ ; CHECK-NEXT: $x12 = SLLI killed $x12, 1
+ ; CHECK-NEXT: $x11 = ADD killed $x11, $x12
; CHECK-NEXT: $v12m4 = VL4RE8_V $x11 :: (load (<vscale x 1 x s256>) from %stack.0, align 8)
- ; CHECK-NEXT: $x12 = SLLI killed $x12, 2
+ ; CHECK-NEXT: $x12 = SLLI killed $x12, 1
; CHECK-NEXT: $x11 = ADD killed $x11, killed $x12
; CHECK-NEXT: $v16 = VL1RE8_V killed $x11 :: (load (<vscale x 1 x s64>) from %stack.0)
; CHECK-NEXT: VS1R_V killed $v10, killed renamable $x10
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll
new file mode 100644
index 0000000..05e06cea
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt-min-max.ll
@@ -0,0 +1,703 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=RV32I-ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=RV64I-ZBB
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb,+short-forward-branch-opt | \
+; RUN: FileCheck %s --check-prefixes=RV32I-SFB-ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb,+short-forward-branch-opt | \
+; RUN: FileCheck %s --check-prefixes=RV64I-SFB-ZBB
+; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb,+short-forward-branch-i-minmax | \
+; RUN: FileCheck %s --check-prefixes=RV32I-SFBIMinMax-ZBB
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb,+short-forward-branch-i-minmax | \
+; RUN: FileCheck %s --check-prefixes=RV64I-SFBIMinMax-ZBB
+
+define i32 @select_example_smax(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_smax:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beqz a2, .LBB0_2
+; RV32I-ZBB-NEXT: # %bb.1:
+; RV32I-ZBB-NEXT: max a1, a0, a3
+; RV32I-ZBB-NEXT: .LBB0_2: # %entry
+; RV32I-ZBB-NEXT: mv a0, a1
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_smax:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB0_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: sext.w a3, a3
+; RV64I-ZBB-NEXT: sext.w a0, a0
+; RV64I-ZBB-NEXT: max a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB0_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smax:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: max a0, a0, a3
+; RV32I-SFB-ZBB-NEXT: bnez a2, .LBB0_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a1
+; RV32I-SFB-ZBB-NEXT: .LBB0_2: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smax:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT: max a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB0_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB0_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smax:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB0_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: max a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB0_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smax:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB0_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: max a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB0_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i32 @llvm.smax.i32(i32 %a, i32 %y)
+ %sel = select i1 %x, i32 %res, i32 %b
+ ret i32 %sel
+}
+
+define i32 @select_example_smin(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_smin:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beqz a2, .LBB1_2
+; RV32I-ZBB-NEXT: # %bb.1:
+; RV32I-ZBB-NEXT: min a1, a0, a3
+; RV32I-ZBB-NEXT: .LBB1_2: # %entry
+; RV32I-ZBB-NEXT: mv a0, a1
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_smin:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB1_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: sext.w a3, a3
+; RV64I-ZBB-NEXT: sext.w a0, a0
+; RV64I-ZBB-NEXT: min a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB1_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smin:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: min a0, a0, a3
+; RV32I-SFB-ZBB-NEXT: bnez a2, .LBB1_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a1
+; RV32I-SFB-ZBB-NEXT: .LBB1_2: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smin:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT: min a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB1_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB1_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smin:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB1_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: min a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB1_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smin:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB1_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: min a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB1_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i32 @llvm.smin.i32(i32 %a, i32 %y)
+ %sel = select i1 %x, i32 %res, i32 %b
+ ret i32 %sel
+}
+
+define i32 @select_example_umax(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_umax:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beqz a2, .LBB2_2
+; RV32I-ZBB-NEXT: # %bb.1:
+; RV32I-ZBB-NEXT: maxu a1, a0, a3
+; RV32I-ZBB-NEXT: .LBB2_2: # %entry
+; RV32I-ZBB-NEXT: mv a0, a1
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_umax:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB2_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: sext.w a3, a3
+; RV64I-ZBB-NEXT: sext.w a0, a0
+; RV64I-ZBB-NEXT: maxu a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB2_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umax:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: maxu a0, a0, a3
+; RV32I-SFB-ZBB-NEXT: bnez a2, .LBB2_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a1
+; RV32I-SFB-ZBB-NEXT: .LBB2_2: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umax:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT: maxu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB2_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB2_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umax:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB2_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: maxu a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB2_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umax:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB2_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: maxu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB2_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i32 @llvm.umax.i32(i32 %a, i32 %y)
+ %sel = select i1 %x, i32 %res, i32 %b
+ ret i32 %sel
+}
+
+define i32 @select_example_umin(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-ZBB-LABEL: select_example_umin:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beqz a2, .LBB3_2
+; RV32I-ZBB-NEXT: # %bb.1:
+; RV32I-ZBB-NEXT: minu a1, a0, a3
+; RV32I-ZBB-NEXT: .LBB3_2: # %entry
+; RV32I-ZBB-NEXT: mv a0, a1
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_umin:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB3_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: sext.w a3, a3
+; RV64I-ZBB-NEXT: sext.w a0, a0
+; RV64I-ZBB-NEXT: minu a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB3_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umin:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: minu a0, a0, a3
+; RV32I-SFB-ZBB-NEXT: bnez a2, .LBB3_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a1
+; RV32I-SFB-ZBB-NEXT: .LBB3_2: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umin:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFB-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFB-ZBB-NEXT: minu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB3_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB3_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umin:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB3_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: minu a1, a0, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB3_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umin:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a3, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: sext.w a0, a0
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB3_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: minu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB3_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i32 @llvm.umin.i32(i32 %a, i32 %y)
+ %sel = select i1 %x, i32 %res, i32 %b
+ ret i32 %sel
+}
+
+define i64 @select_example_smax_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_smax_1:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beq a1, a6, .LBB4_2
+; RV32I-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-ZBB-NEXT: slt a7, a6, a1
+; RV32I-ZBB-NEXT: beqz a7, .LBB4_3
+; RV32I-ZBB-NEXT: j .LBB4_4
+; RV32I-ZBB-NEXT: .LBB4_2:
+; RV32I-ZBB-NEXT: sltu a7, a5, a0
+; RV32I-ZBB-NEXT: bnez a7, .LBB4_4
+; RV32I-ZBB-NEXT: .LBB4_3: # %entry
+; RV32I-ZBB-NEXT: mv a1, a6
+; RV32I-ZBB-NEXT: mv a0, a5
+; RV32I-ZBB-NEXT: .LBB4_4: # %entry
+; RV32I-ZBB-NEXT: beqz a4, .LBB4_6
+; RV32I-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-ZBB-NEXT: ret
+; RV32I-ZBB-NEXT: .LBB4_6: # %entry
+; RV32I-ZBB-NEXT: mv a0, a2
+; RV32I-ZBB-NEXT: mv a1, a3
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_smax_1:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB4_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: max a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB4_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smax_1:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: sltu a7, a5, a0
+; RV32I-SFB-ZBB-NEXT: slt t0, a6, a1
+; RV32I-SFB-ZBB-NEXT: bne a1, a6, .LBB4_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv t0, a7
+; RV32I-SFB-ZBB-NEXT: .LBB4_2: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB4_4
+; RV32I-SFB-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a6
+; RV32I-SFB-ZBB-NEXT: .LBB4_4: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB4_6
+; RV32I-SFB-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a5
+; RV32I-SFB-ZBB-NEXT: .LBB4_6: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB4_8
+; RV32I-SFB-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a2
+; RV32I-SFB-ZBB-NEXT: .LBB4_8: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB4_10
+; RV32I-SFB-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a3
+; RV32I-SFB-ZBB-NEXT: .LBB4_10: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smax_1:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: max a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB4_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB4_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smax_1:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: sltu a7, a5, a0
+; RV32I-SFBIMinMax-ZBB-NEXT: slt t0, a6, a1
+; RV32I-SFBIMinMax-ZBB-NEXT: bne a1, a6, .LBB4_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB4_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB4_4
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB4_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB4_6
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB4_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB4_8
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB4_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB4_10
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB4_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smax_1:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB4_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: max a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB4_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i64 @llvm.smax.i64(i64 %a, i64 %y)
+ %sel = select i1 %x, i64 %res, i64 %b
+ ret i64 %sel
+}
+
+define i64 @select_example_smin_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_smin_1:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beq a1, a6, .LBB5_2
+; RV32I-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-ZBB-NEXT: slt a7, a1, a6
+; RV32I-ZBB-NEXT: beqz a7, .LBB5_3
+; RV32I-ZBB-NEXT: j .LBB5_4
+; RV32I-ZBB-NEXT: .LBB5_2:
+; RV32I-ZBB-NEXT: sltu a7, a0, a5
+; RV32I-ZBB-NEXT: bnez a7, .LBB5_4
+; RV32I-ZBB-NEXT: .LBB5_3: # %entry
+; RV32I-ZBB-NEXT: mv a1, a6
+; RV32I-ZBB-NEXT: mv a0, a5
+; RV32I-ZBB-NEXT: .LBB5_4: # %entry
+; RV32I-ZBB-NEXT: beqz a4, .LBB5_6
+; RV32I-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-ZBB-NEXT: ret
+; RV32I-ZBB-NEXT: .LBB5_6: # %entry
+; RV32I-ZBB-NEXT: mv a0, a2
+; RV32I-ZBB-NEXT: mv a1, a3
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_smin_1:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB5_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: min a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB5_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_smin_1:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: sltu a7, a0, a5
+; RV32I-SFB-ZBB-NEXT: slt t0, a1, a6
+; RV32I-SFB-ZBB-NEXT: bne a1, a6, .LBB5_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv t0, a7
+; RV32I-SFB-ZBB-NEXT: .LBB5_2: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB5_4
+; RV32I-SFB-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a6
+; RV32I-SFB-ZBB-NEXT: .LBB5_4: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB5_6
+; RV32I-SFB-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a5
+; RV32I-SFB-ZBB-NEXT: .LBB5_6: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB5_8
+; RV32I-SFB-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a2
+; RV32I-SFB-ZBB-NEXT: .LBB5_8: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB5_10
+; RV32I-SFB-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a3
+; RV32I-SFB-ZBB-NEXT: .LBB5_10: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_smin_1:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: min a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB5_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB5_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_smin_1:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: sltu a7, a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT: slt t0, a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT: bne a1, a6, .LBB5_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB5_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB5_4
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB5_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB5_6
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB5_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB5_8
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB5_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB5_10
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB5_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_smin_1:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB5_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: min a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB5_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i64 @llvm.smin.i64(i64 %a, i64 %y)
+ %sel = select i1 %x, i64 %res, i64 %b
+ ret i64 %sel
+}
+
+define i64 @select_example_umax_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_umax_1:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beq a1, a6, .LBB6_2
+; RV32I-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-ZBB-NEXT: sltu a7, a6, a1
+; RV32I-ZBB-NEXT: beqz a7, .LBB6_3
+; RV32I-ZBB-NEXT: j .LBB6_4
+; RV32I-ZBB-NEXT: .LBB6_2:
+; RV32I-ZBB-NEXT: sltu a7, a5, a0
+; RV32I-ZBB-NEXT: bnez a7, .LBB6_4
+; RV32I-ZBB-NEXT: .LBB6_3: # %entry
+; RV32I-ZBB-NEXT: mv a1, a6
+; RV32I-ZBB-NEXT: mv a0, a5
+; RV32I-ZBB-NEXT: .LBB6_4: # %entry
+; RV32I-ZBB-NEXT: beqz a4, .LBB6_6
+; RV32I-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-ZBB-NEXT: ret
+; RV32I-ZBB-NEXT: .LBB6_6: # %entry
+; RV32I-ZBB-NEXT: mv a0, a2
+; RV32I-ZBB-NEXT: mv a1, a3
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_umax_1:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB6_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: maxu a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB6_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umax_1:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: sltu a7, a5, a0
+; RV32I-SFB-ZBB-NEXT: sltu t0, a6, a1
+; RV32I-SFB-ZBB-NEXT: bne a1, a6, .LBB6_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv t0, a7
+; RV32I-SFB-ZBB-NEXT: .LBB6_2: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB6_4
+; RV32I-SFB-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a6
+; RV32I-SFB-ZBB-NEXT: .LBB6_4: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB6_6
+; RV32I-SFB-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a5
+; RV32I-SFB-ZBB-NEXT: .LBB6_6: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB6_8
+; RV32I-SFB-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a2
+; RV32I-SFB-ZBB-NEXT: .LBB6_8: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB6_10
+; RV32I-SFB-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a3
+; RV32I-SFB-ZBB-NEXT: .LBB6_10: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umax_1:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: maxu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB6_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB6_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umax_1:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: sltu a7, a5, a0
+; RV32I-SFBIMinMax-ZBB-NEXT: sltu t0, a6, a1
+; RV32I-SFBIMinMax-ZBB-NEXT: bne a1, a6, .LBB6_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB6_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB6_4
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB6_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB6_6
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB6_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB6_8
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB6_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB6_10
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB6_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umax_1:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB6_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: maxu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB6_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i64 @llvm.umax.i64(i64 %a, i64 %y)
+ %sel = select i1 %x, i64 %res, i64 %b
+ ret i64 %sel
+}
+
+define i64 @select_example_umin_1(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-ZBB-LABEL: select_example_umin_1:
+; RV32I-ZBB: # %bb.0: # %entry
+; RV32I-ZBB-NEXT: beq a1, a6, .LBB7_2
+; RV32I-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-ZBB-NEXT: sltu a7, a1, a6
+; RV32I-ZBB-NEXT: beqz a7, .LBB7_3
+; RV32I-ZBB-NEXT: j .LBB7_4
+; RV32I-ZBB-NEXT: .LBB7_2:
+; RV32I-ZBB-NEXT: sltu a7, a0, a5
+; RV32I-ZBB-NEXT: bnez a7, .LBB7_4
+; RV32I-ZBB-NEXT: .LBB7_3: # %entry
+; RV32I-ZBB-NEXT: mv a1, a6
+; RV32I-ZBB-NEXT: mv a0, a5
+; RV32I-ZBB-NEXT: .LBB7_4: # %entry
+; RV32I-ZBB-NEXT: beqz a4, .LBB7_6
+; RV32I-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-ZBB-NEXT: ret
+; RV32I-ZBB-NEXT: .LBB7_6: # %entry
+; RV32I-ZBB-NEXT: mv a0, a2
+; RV32I-ZBB-NEXT: mv a1, a3
+; RV32I-ZBB-NEXT: ret
+;
+; RV64I-ZBB-LABEL: select_example_umin_1:
+; RV64I-ZBB: # %bb.0: # %entry
+; RV64I-ZBB-NEXT: beqz a2, .LBB7_2
+; RV64I-ZBB-NEXT: # %bb.1:
+; RV64I-ZBB-NEXT: minu a1, a0, a3
+; RV64I-ZBB-NEXT: .LBB7_2: # %entry
+; RV64I-ZBB-NEXT: mv a0, a1
+; RV64I-ZBB-NEXT: ret
+;
+; RV32I-SFB-ZBB-LABEL: select_example_umin_1:
+; RV32I-SFB-ZBB: # %bb.0: # %entry
+; RV32I-SFB-ZBB-NEXT: sltu a7, a0, a5
+; RV32I-SFB-ZBB-NEXT: sltu t0, a1, a6
+; RV32I-SFB-ZBB-NEXT: bne a1, a6, .LBB7_2
+; RV32I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFB-ZBB-NEXT: mv t0, a7
+; RV32I-SFB-ZBB-NEXT: .LBB7_2: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB7_4
+; RV32I-SFB-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a6
+; RV32I-SFB-ZBB-NEXT: .LBB7_4: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez t0, .LBB7_6
+; RV32I-SFB-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a5
+; RV32I-SFB-ZBB-NEXT: .LBB7_6: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB7_8
+; RV32I-SFB-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a0, a2
+; RV32I-SFB-ZBB-NEXT: .LBB7_8: # %entry
+; RV32I-SFB-ZBB-NEXT: bnez a4, .LBB7_10
+; RV32I-SFB-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFB-ZBB-NEXT: mv a1, a3
+; RV32I-SFB-ZBB-NEXT: .LBB7_10: # %entry
+; RV32I-SFB-ZBB-NEXT: ret
+;
+; RV64I-SFB-ZBB-LABEL: select_example_umin_1:
+; RV64I-SFB-ZBB: # %bb.0: # %entry
+; RV64I-SFB-ZBB-NEXT: minu a0, a0, a3
+; RV64I-SFB-ZBB-NEXT: bnez a2, .LBB7_2
+; RV64I-SFB-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFB-ZBB-NEXT: mv a0, a1
+; RV64I-SFB-ZBB-NEXT: .LBB7_2: # %entry
+; RV64I-SFB-ZBB-NEXT: ret
+;
+; RV32I-SFBIMinMax-ZBB-LABEL: select_example_umin_1:
+; RV32I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: sltu a7, a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT: sltu t0, a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT: bne a1, a6, .LBB7_2
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv t0, a7
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB7_2: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB7_4
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.3: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a6
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB7_4: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez t0, .LBB7_6
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.5: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a5
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB7_6: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB7_8
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.7: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a0, a2
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB7_8: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: bnez a4, .LBB7_10
+; RV32I-SFBIMinMax-ZBB-NEXT: # %bb.9: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: mv a1, a3
+; RV32I-SFBIMinMax-ZBB-NEXT: .LBB7_10: # %entry
+; RV32I-SFBIMinMax-ZBB-NEXT: ret
+;
+; RV64I-SFBIMinMax-ZBB-LABEL: select_example_umin_1:
+; RV64I-SFBIMinMax-ZBB: # %bb.0: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: beqz a2, .LBB7_2
+; RV64I-SFBIMinMax-ZBB-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: minu a1, a0, a3
+; RV64I-SFBIMinMax-ZBB-NEXT: .LBB7_2: # %entry
+; RV64I-SFBIMinMax-ZBB-NEXT: mv a0, a1
+; RV64I-SFBIMinMax-ZBB-NEXT: ret
+entry:
+ %res = call i64 @llvm.umin.i64(i64 %a, i64 %y)
+ %sel = select i1 %x, i64 %res, i64 %b
+ ret i64 %sel
+}