aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/RISCV
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV')
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll1341
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll5100
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll1341
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll5100
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll1293
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll4881
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll1310
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll4881
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-stackmap-fp.ll41
-rw-r--r--llvm/test/CodeGen/RISCV/rv64-stackmap.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/short-forward-branch-opt-mul.ll156
13 files changed, 25451 insertions, 6 deletions
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll
new file mode 100644
index 0000000..5cb55f1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei-rv64.ll
@@ -0,0 +1,1341 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vloxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vloxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vloxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vloxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll
new file mode 100644
index 0000000..fafd45b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vloxei.ll
@@ -0,0 +1,5100 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vloxei32.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vloxei32.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vloxei32.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei32.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei32.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vloxei32.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei32.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei32.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vloxei32.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vloxei16.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vloxei16.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vloxei16.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> poison,
+ ptr %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> poison,
+ ptr %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> poison,
+ ptr %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> poison,
+ ptr %0,
+ <vscale x 32 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> poison,
+ ptr %0,
+ <vscale x 64 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> poison,
+ ptr %0,
+ <vscale x 32 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> poison,
+ ptr %0,
+ <vscale x 32 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vloxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vloxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vloxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll
new file mode 100644
index 0000000..916af25
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei-rv64.ll
@@ -0,0 +1,1341 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vluxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vluxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vluxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vluxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei64.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei64.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei64.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei64.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(ptr %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(ptr %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(ptr %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(ptr %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei64.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4, i64 1)
+
+ ret <vscale x 8 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll
new file mode 100644
index 0000000..8dd32a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vluxei.ll
@@ -0,0 +1,5100 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vluxei32.v v10, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vluxei32.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vluxei32.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei32.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei32.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vluxei32.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei32.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei32.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vluxei32.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(ptr %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> poison,
+ ptr %0,
+ <vscale x 16 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(ptr %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei32.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(ptr %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(ptr %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(ptr %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vluxei16.v v10, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vluxei16.v v12, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vluxei16.v v16, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> poison,
+ ptr %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> poison,
+ ptr %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(ptr %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> poison,
+ ptr %0,
+ <vscale x 32 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(ptr %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> poison,
+ ptr %0,
+ <vscale x 16 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(ptr %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei16.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(ptr %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(ptr %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(ptr %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> poison,
+ ptr %0,
+ <vscale x 32 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(ptr %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> poison,
+ ptr %0,
+ <vscale x 64 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> poison,
+ ptr %0,
+ <vscale x 32 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(ptr %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv4r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> poison,
+ ptr %0,
+ <vscale x 32 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(ptr %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> poison,
+ ptr %0,
+ <vscale x 16 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(ptr %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vluxei8.v v9, (a0), v8
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> poison,
+ ptr %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(ptr %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> poison,
+ ptr %0,
+ <vscale x 2 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(ptr %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> poison,
+ ptr %0,
+ <vscale x 4 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(ptr %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vluxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> poison,
+ ptr %0,
+ <vscale x 8 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen,
+ iXLen);
+
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vluxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4, iXLen 1)
+
+ ret <vscale x 8 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll
new file mode 100644
index 0000000..4963d91
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei-rv64.ll
@@ -0,0 +1,1293 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -global-isel -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll
new file mode 100644
index 0000000..7ea2e17
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsoxei.ll
@@ -0,0 +1,4881 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ <vscale x 64 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsoxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll
new file mode 100644
index 0000000..9bd272a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei-rv64.ll
@@ -0,0 +1,1310 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvfbfmin -global-isel -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+define void @intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_allonesmask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> splat (i1 true),
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll
new file mode 100644
index 0000000..7cd1545
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rvv/vsuxei.ll
@@ -0,0 +1,4881 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin \
+; RUN: -global-isel -verify-machineinstrs -target-abi=lp64d | FileCheck %s
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei32.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei16.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ <vscale x 64 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ ptr,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ ptr %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ ptr,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ ptr %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ ptr,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ ptr %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ ptr,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ ptr %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ ptr,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v10, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ ptr %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ ptr,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v12, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ ptr %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ iXLen);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ iXLen %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ ptr,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ iXLen);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; CHECK-NEXT: vsuxei8.v v8, (a0), v16, v0.t
+; CHECK-NEXT: ret
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ ptr %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ iXLen %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 988d049..cf44af6 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -137,6 +137,7 @@
; CHECK-NEXT: shifted-zextw-fusion - Enable SLLI+SRLI to be fused when computing (shifted) word zero extension.
; CHECK-NEXT: shlcofideleg - 'Shlcofideleg' (Delegating LCOFI Interrupts to VS-mode).
; CHECK-NEXT: short-forward-branch-i-minmax - Enable short forward branch optimization for min,max instructions in Zbb.
+; CHECK-NEXT: short-forward-branch-i-mul - Enable short forward branch optimization for mul instruction.
; CHECK-NEXT: short-forward-branch-opt - Enable short forward branch optimization.
; CHECK-NEXT: shtvala - 'Shtvala' (htval provides all needed values).
; CHECK-NEXT: shvsatpa - 'Shvsatpa' (vsatp supports all modes supported by satp).
diff --git a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll
index c489bc3..aa63552 100644
--- a/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll
+++ b/llvm/test/CodeGen/RISCV/replace-with-veclib-sleef-scalable.ll
@@ -488,5 +488,5 @@ declare <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double>)
declare <vscale x 4 x float> @llvm.trunc.nxv4f32(<vscale x 4 x float>)
;.
; CHECK: attributes #[[ATTR0]] = { "target-features"="+v" }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) "target-features"="+v" }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nocreateundeforpoison nofree nosync nounwind speculatable willreturn memory(none) "target-features"="+v" }
;.
diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap-fp.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap-fp.ll
new file mode 100644
index 0000000..bf0a2e5
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rv64-stackmap-fp.ll
@@ -0,0 +1,41 @@
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh < %s | FileCheck %s
+
+; CHECK-LABEL: .section .llvm_stackmaps
+; CHECK-NEXT: __LLVM_StackMaps:
+; Header
+; CHECK-NEXT: .byte 3
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .half 0
+; Num Functions
+; CHECK-NEXT: .word 1
+; Num LargeConstants
+; CHECK-NEXT: .word 0
+; Num Callsites
+; CHECK-NEXT: .word 1
+
+; Functions and stack size
+; CHECK-NEXT: .quad liveArgs
+; CHECK-NEXT: .quad 0
+; CHECK-NEXT: .quad 1
+
+; Spilled stack map values.
+;
+; Verify 3 stack map entries.
+;
+; CHECK-LABEL: .word .L{{.*}}-liveArgs
+; CHECK-NEXT: .half 0
+; CHECK-NEXT: .half 25
+;
+; Check that at least one is a spilled entry from SP.
+; Location: Indirect SP + ...
+; CHECK: .byte 3
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .half 8
+; CHECK-NEXT: .half 2
+; CHECK-NEXT: .half 0
+; CHECK-NEXT: .word
+define void @liveArgs(double %arg0, double %arg1, double %arg2, double %arg3, double %arg4, double %arg5, double %arg6, double %arg7, double %arg8, double %arg9, double %arg10, double %arg11, double %arg12, double %arg13, double %arg14, double %arg15, double %arg16, double %arg17, double %arg18, double %arg19, double %arg20, double %arg21, double %arg22, double %arg23, half %arg24, half %arg25, half %arg26, half %arg27, half %arg28, bfloat %arg29) {
+entry:
+ call void (i64, i32, ptr, i32, ...) @llvm.experimental.patchpoint.void(i64 11, i32 28, ptr null, i32 5, double %arg0, double %arg1, double %arg2, double %arg3, double %arg4, double %arg5, double %arg6, double %arg7, double %arg8, double %arg9, double %arg10, double %arg11, double %arg12, double %arg13, double %arg14, double %arg15, double %arg16, double %arg17, double %arg18, double %arg19, double %arg20, double %arg21, double %arg22, double %arg23, half %arg24, half %arg25, half %arg26, half %arg27, half %arg28, bfloat %arg29)
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
index c50a0fb3..320a3aa 100644
--- a/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-stackmap.ll
@@ -286,8 +286,8 @@ define void @liveConstant() {
; CHECK-NEXT: .half 0
; CHECK-NEXT: .half 28
;
-; Check that at least one is a spilled entry from RBP.
-; Location: Indirect RBP + ...
+; Check that at least one is a spilled entry from SP.
+; Location: Indirect SP + ...
; CHECK: .byte 3
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .half 8
@@ -307,7 +307,7 @@ entry:
; CHECK-NEXT: .half 0
; 1 location
; CHECK-NEXT: .half 1
-; Loc 0: Direct RBP - ofs
+; Loc 0: Direct SP + ofs
; CHECK-NEXT: .byte 2
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .half 8
@@ -320,14 +320,14 @@ entry:
; CHECK-NEXT: .half 0
; 2 locations
; CHECK-NEXT: .half 2
-; Loc 0: Direct RBP - ofs
+; Loc 0: Direct SP + ofs
; CHECK-NEXT: .byte 2
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .half 8
; CHECK-NEXT: .half 2
; CHECK-NEXT: .half 0
; CHECK-NEXT: .word
-; Loc 1: Direct RBP - ofs
+; Loc 1: Direct SP + ofs
; CHECK-NEXT: .byte 2
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .half 8
diff --git a/llvm/test/CodeGen/RISCV/short-forward-branch-opt-mul.ll b/llvm/test/CodeGen/RISCV/short-forward-branch-opt-mul.ll
new file mode 100644
index 0000000..3f780fd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/short-forward-branch-opt-mul.ll
@@ -0,0 +1,156 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefixes=RV32I-M
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefixes=RV64I-M
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+short-forward-branch-opt | \
+; RUN: FileCheck %s --check-prefixes=RV32I-SFB-M
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+short-forward-branch-opt | \
+; RUN: FileCheck %s --check-prefixes=RV64I-SFB-M
+; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+short-forward-branch-i-mul | \
+; RUN: FileCheck %s --check-prefixes=RV32I-SFBIMul-M
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+short-forward-branch-i-mul | \
+; RUN: FileCheck %s --check-prefixes=RV64I-SFBIMul-M
+
+define i32 @select_example_mul_i32(i32 %a, i32 %b, i1 zeroext %x, i32 %y) {
+; RV32I-M-LABEL: select_example_mul_i32:
+; RV32I-M: # %bb.0: # %entry
+; RV32I-M-NEXT: beqz a2, .LBB0_2
+; RV32I-M-NEXT: # %bb.1:
+; RV32I-M-NEXT: mul a1, a0, a3
+; RV32I-M-NEXT: .LBB0_2: # %entry
+; RV32I-M-NEXT: mv a0, a1
+; RV32I-M-NEXT: ret
+;
+; RV64I-M-LABEL: select_example_mul_i32:
+; RV64I-M: # %bb.0: # %entry
+; RV64I-M-NEXT: beqz a2, .LBB0_2
+; RV64I-M-NEXT: # %bb.1:
+; RV64I-M-NEXT: mulw a1, a0, a3
+; RV64I-M-NEXT: .LBB0_2: # %entry
+; RV64I-M-NEXT: mv a0, a1
+; RV64I-M-NEXT: ret
+;
+; RV32I-SFB-M-LABEL: select_example_mul_i32:
+; RV32I-SFB-M: # %bb.0: # %entry
+; RV32I-SFB-M-NEXT: mul a0, a0, a3
+; RV32I-SFB-M-NEXT: bnez a2, .LBB0_2
+; RV32I-SFB-M-NEXT: # %bb.1: # %entry
+; RV32I-SFB-M-NEXT: mv a0, a1
+; RV32I-SFB-M-NEXT: .LBB0_2: # %entry
+; RV32I-SFB-M-NEXT: ret
+;
+; RV64I-SFB-M-LABEL: select_example_mul_i32:
+; RV64I-SFB-M: # %bb.0: # %entry
+; RV64I-SFB-M-NEXT: mulw a0, a0, a3
+; RV64I-SFB-M-NEXT: bnez a2, .LBB0_2
+; RV64I-SFB-M-NEXT: # %bb.1: # %entry
+; RV64I-SFB-M-NEXT: mv a0, a1
+; RV64I-SFB-M-NEXT: .LBB0_2: # %entry
+; RV64I-SFB-M-NEXT: ret
+;
+; RV32I-SFBIMul-M-LABEL: select_example_mul_i32:
+; RV32I-SFBIMul-M: # %bb.0: # %entry
+; RV32I-SFBIMul-M-NEXT: beqz a2, .LBB0_2
+; RV32I-SFBIMul-M-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMul-M-NEXT: mul a1, a0, a3
+; RV32I-SFBIMul-M-NEXT: .LBB0_2: # %entry
+; RV32I-SFBIMul-M-NEXT: mv a0, a1
+; RV32I-SFBIMul-M-NEXT: ret
+;
+; RV64I-SFBIMul-M-LABEL: select_example_mul_i32:
+; RV64I-SFBIMul-M: # %bb.0: # %entry
+; RV64I-SFBIMul-M-NEXT: mulw a0, a0, a3
+; RV64I-SFBIMul-M-NEXT: bnez a2, .LBB0_2
+; RV64I-SFBIMul-M-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMul-M-NEXT: mv a0, a1
+; RV64I-SFBIMul-M-NEXT: .LBB0_2: # %entry
+; RV64I-SFBIMul-M-NEXT: ret
+entry:
+ %res = mul i32 %a, %y
+ %sel = select i1 %x, i32 %res, i32 %b
+ ret i32 %sel
+}
+
+define i64 @select_example_mul_i64(i64 %a, i64 %b, i1 zeroext %x, i64 %y) {
+; RV32I-M-LABEL: select_example_mul_i64:
+; RV32I-M: # %bb.0: # %entry
+; RV32I-M-NEXT: beqz a4, .LBB1_2
+; RV32I-M-NEXT: # %bb.1:
+; RV32I-M-NEXT: mul a2, a0, a6
+; RV32I-M-NEXT: mulhu a3, a0, a5
+; RV32I-M-NEXT: mul a1, a1, a5
+; RV32I-M-NEXT: add a2, a3, a2
+; RV32I-M-NEXT: add a3, a2, a1
+; RV32I-M-NEXT: mul a2, a0, a5
+; RV32I-M-NEXT: .LBB1_2: # %entry
+; RV32I-M-NEXT: mv a0, a2
+; RV32I-M-NEXT: mv a1, a3
+; RV32I-M-NEXT: ret
+;
+; RV64I-M-LABEL: select_example_mul_i64:
+; RV64I-M: # %bb.0: # %entry
+; RV64I-M-NEXT: beqz a2, .LBB1_2
+; RV64I-M-NEXT: # %bb.1:
+; RV64I-M-NEXT: mul a1, a0, a3
+; RV64I-M-NEXT: .LBB1_2: # %entry
+; RV64I-M-NEXT: mv a0, a1
+; RV64I-M-NEXT: ret
+;
+; RV32I-SFB-M-LABEL: select_example_mul_i64:
+; RV32I-SFB-M: # %bb.0: # %entry
+; RV32I-SFB-M-NEXT: mul a6, a0, a6
+; RV32I-SFB-M-NEXT: mulhu a7, a0, a5
+; RV32I-SFB-M-NEXT: mul a1, a1, a5
+; RV32I-SFB-M-NEXT: mul a0, a0, a5
+; RV32I-SFB-M-NEXT: add a6, a7, a6
+; RV32I-SFB-M-NEXT: beqz a4, .LBB1_2
+; RV32I-SFB-M-NEXT: # %bb.1: # %entry
+; RV32I-SFB-M-NEXT: add a3, a6, a1
+; RV32I-SFB-M-NEXT: .LBB1_2: # %entry
+; RV32I-SFB-M-NEXT: bnez a4, .LBB1_4
+; RV32I-SFB-M-NEXT: # %bb.3: # %entry
+; RV32I-SFB-M-NEXT: mv a0, a2
+; RV32I-SFB-M-NEXT: .LBB1_4: # %entry
+; RV32I-SFB-M-NEXT: mv a1, a3
+; RV32I-SFB-M-NEXT: ret
+;
+; RV64I-SFB-M-LABEL: select_example_mul_i64:
+; RV64I-SFB-M: # %bb.0: # %entry
+; RV64I-SFB-M-NEXT: mul a0, a0, a3
+; RV64I-SFB-M-NEXT: bnez a2, .LBB1_2
+; RV64I-SFB-M-NEXT: # %bb.1: # %entry
+; RV64I-SFB-M-NEXT: mv a0, a1
+; RV64I-SFB-M-NEXT: .LBB1_2: # %entry
+; RV64I-SFB-M-NEXT: ret
+;
+; RV32I-SFBIMul-M-LABEL: select_example_mul_i64:
+; RV32I-SFBIMul-M: # %bb.0: # %entry
+; RV32I-SFBIMul-M-NEXT: mul a6, a0, a6
+; RV32I-SFBIMul-M-NEXT: mulhu a7, a0, a5
+; RV32I-SFBIMul-M-NEXT: mul a1, a1, a5
+; RV32I-SFBIMul-M-NEXT: add a6, a7, a6
+; RV32I-SFBIMul-M-NEXT: beqz a4, .LBB1_2
+; RV32I-SFBIMul-M-NEXT: # %bb.1: # %entry
+; RV32I-SFBIMul-M-NEXT: add a3, a6, a1
+; RV32I-SFBIMul-M-NEXT: .LBB1_2: # %entry
+; RV32I-SFBIMul-M-NEXT: beqz a4, .LBB1_4
+; RV32I-SFBIMul-M-NEXT: # %bb.3: # %entry
+; RV32I-SFBIMul-M-NEXT: mul a2, a0, a5
+; RV32I-SFBIMul-M-NEXT: .LBB1_4: # %entry
+; RV32I-SFBIMul-M-NEXT: mv a0, a2
+; RV32I-SFBIMul-M-NEXT: mv a1, a3
+; RV32I-SFBIMul-M-NEXT: ret
+;
+; RV64I-SFBIMul-M-LABEL: select_example_mul_i64:
+; RV64I-SFBIMul-M: # %bb.0: # %entry
+; RV64I-SFBIMul-M-NEXT: beqz a2, .LBB1_2
+; RV64I-SFBIMul-M-NEXT: # %bb.1: # %entry
+; RV64I-SFBIMul-M-NEXT: mul a1, a0, a3
+; RV64I-SFBIMul-M-NEXT: .LBB1_2: # %entry
+; RV64I-SFBIMul-M-NEXT: mv a0, a1
+; RV64I-SFBIMul-M-NEXT: ret
+entry:
+ %res = mul i64 %a, %y
+ %sel = select i1 %x, i64 %res, i64 %b
+ ret i64 %sel
+}
+