; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.experimental.vp.splice.nxv2i64(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv1i64(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv2i32(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv4i16(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv8i8(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv1f64(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv2f32(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv16i64(, , i32, , i32, i32) define @test_vp_splice_nxv2i64( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2i64( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i64_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i64_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 5 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2i64( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i64_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu ; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv2i64( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1i64( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv1i64( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1i64_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i64_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 5 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv1i64( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1i64_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv1i64( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i32( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2i32( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i32_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i32_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 5 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2i32( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i32_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i32_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv2i32( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv4i16( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv4i16( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv4i16_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i16_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 5 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv4i16( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv4i16_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i16_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv4i16( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv8i8( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv8i8( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv8i8_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i8_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 5 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv8i8( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv8i8_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i8_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv8i8( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1f64( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv1f64( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1f64_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1f64_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 5 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv1f64( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1f64_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1f64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv1f64( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2f32( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vx v8, v9, a0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2f32( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2f32_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2f32_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vslideup.vi v8, v9, 5 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2f32( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2f32_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2f32_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv2f32( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v }