; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s declare target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2), ptr , i64, i64) define target("riscv.vector.tuple", , 2) @test_vlseg_nxv8i8(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vlseg_nxv8i8 ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[PseudoVLSEG2E8_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 1) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY killed [[PseudoVLSEG2E8_V_M1_]] ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] ; CHECK-NEXT: PseudoRET implicit $v8_v9 entry: %0 = call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) poison, ptr %p, i64 %vl, i64 3) ret target("riscv.vector.tuple", , 2) %0 } define target("riscv.vector.tuple", , 2) @test_vlseg_nxv4i16(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vlseg_nxv4i16 ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[PseudoVLSEG2E16_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16_V_M1 $noreg, [[COPY1]], [[COPY]], 4 /* e16 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 2) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY killed [[PseudoVLSEG2E16_V_M1_]] ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] ; CHECK-NEXT: PseudoRET implicit $v8_v9 entry: %0 = call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) poison, ptr %p, i64 %vl, i64 4) ret target("riscv.vector.tuple", , 2) %0 } define target("riscv.vector.tuple", , 2) @test_vlseg_nxv2i32(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vlseg_nxv2i32 ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[PseudoVLSEG2E32_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32_V_M1 $noreg, [[COPY1]], [[COPY]], 5 /* e32 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 4) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY killed [[PseudoVLSEG2E32_V_M1_]] ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] ; CHECK-NEXT: PseudoRET implicit $v8_v9 entry: %0 = call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) poison, ptr %p, i64 %vl, i64 5) ret target("riscv.vector.tuple", , 2) %0 } define target("riscv.vector.tuple", , 2) @test_vlseg_nxv1i64(ptr %p, i64 %vl) { ; CHECK-LABEL: name: test_vlseg_nxv1i64 ; CHECK: bb.0.entry: ; CHECK-NEXT: liveins: $x10, $x11 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10 ; CHECK-NEXT: [[PseudoVLSEG2E64_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64_V_M1 $noreg, [[COPY1]], [[COPY]], 6 /* e64 */, 2 /* tu, ma */ :: (load unknown-size from %ir.p, align 8) ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vrn2m1 = COPY killed [[PseudoVLSEG2E64_V_M1_]] ; CHECK-NEXT: $v8_v9 = COPY [[COPY2]] ; CHECK-NEXT: PseudoRET implicit $v8_v9 entry: %0 = call target("riscv.vector.tuple", , 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", , 2) poison, ptr %p, i64 %vl, i64 6) ret target("riscv.vector.tuple", , 2) %0 }