; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+v,+zvfh,+zvfbfmin -verify-machineinstrs -riscv-v-vector-bits-min=128 \ ; RUN: < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+v,+zvfhmin,+zvfbfmin -verify-machineinstrs -riscv-v-vector-bits-min=128 \ ; RUN: < %s | FileCheck %s define <2 x double> @test_vp_reverse_v2f64_masked(<2 x double> %src, <2 x i1> %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v2f64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %dst = call <2 x double> @llvm.experimental.vp.reverse.v2f64(<2 x double> %src, <2 x i1> %mask, i32 %evl) ret <2 x double> %dst } define <2 x double> @test_vp_reverse_v2f64(<2 x double> %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a1 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %dst = call <2 x double> @llvm.experimental.vp.reverse.v2f64(<2 x double> %src, <2 x i1> splat (i1 1), i32 %evl) ret <2 x double> %dst } define <4 x float> @test_vp_reverse_v4f32_masked(<4 x float> %src, <4 x i1> %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v4f32_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %dst = call <4 x float> @llvm.experimental.vp.reverse.v4f32(<4 x float> %src, <4 x i1> %mask, i32 %evl) ret <4 x float> %dst } define <4 x float> @test_vp_reverse_v4f32(<4 x float> %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a1 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %dst = call <4 x float> @llvm.experimental.vp.reverse.v4f32(<4 x float> %src, <4 x i1> splat (i1 1), i32 %evl) ret <4 x float> %dst } define <4 x half> @test_vp_reverse_v4f16_masked(<4 x half> %src, <4 x i1> %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v4f16_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %dst = call <4 x half> @llvm.experimental.vp.reverse.v4f16(<4 x half> %src, <4 x i1> %mask, i32 %evl) ret <4 x half> %dst } define <4 x half> @test_vp_reverse_v4f16(<4 x half> %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a1 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %dst = call <4 x half> @llvm.experimental.vp.reverse.v4f16(<4 x half> %src, <4 x i1> splat (i1 1), i32 %evl) ret <4 x half> %dst } define <4 x bfloat> @test_vp_reverse_v4bf16_masked(<4 x bfloat> %src, <4 x i1> %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v4bf16_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %dst = call <4 x bfloat> @llvm.experimental.vp.reverse.v4bf16(<4 x bfloat> %src, <4 x i1> %mask, i32 %evl) ret <4 x bfloat> %dst } define <4 x bfloat> @test_vp_reverse_v4bf16(<4 x bfloat> %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_v4bf16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a1 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %dst = call <4 x bfloat> @llvm.experimental.vp.reverse.v4bf16(<4 x bfloat> %src, <4 x i1> splat (i1 1), i32 %evl) ret <4 x bfloat> %dst }